You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1823 lines
49KB

  1. /*
  2. Copyright (C) 2001 Michael Niedermayer (michaelni@gmx.at)
  3. This program is free software; you can redistribute it and/or modify
  4. it under the terms of the GNU General Public License as published by
  5. the Free Software Foundation; either version 2 of the License, or
  6. (at your option) any later version.
  7. This program is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU General Public License for more details.
  11. You should have received a copy of the GNU General Public License
  12. along with this program; if not, write to the Free Software
  13. Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  14. */
  15. /*
  16. C MMX MMX2 3DNow*
  17. isVertDC Ec Ec
  18. isVertMinMaxOk Ec Ec
  19. doVertLowPass E e e*
  20. doVertDefFilter Ec Ec Ec
  21. isHorizDC Ec Ec
  22. isHorizMinMaxOk a
  23. doHorizLowPass E a a*
  24. doHorizDefFilter E ac ac
  25. deRing
  26. * i dont have a 3dnow CPU -> its untested
  27. E = Exact implementation
  28. e = allmost exact implementation
  29. a = alternative / approximate impl
  30. c = checked against the other implementations (-vo md5)
  31. */
  32. /*
  33. TODO:
  34. verify that everything workes as it should
  35. reduce the time wasted on the mem transfer
  36. implement dering
  37. implement everything in C at least (done at the moment but ...)
  38. figure range of QP out (assuming <256 for now)
  39. unroll stuff if instructions depend too much on the prior one
  40. we use 8x8 blocks for the horizontal filters, opendivx seems to use 8x4?
  41. move YScale thing to the end instead of fixing QP
  42. write a faster and higher quality deblocking filter :)
  43. ...
  44. Notes:
  45. */
  46. /*
  47. Changelog:
  48. 0.1.2
  49. fixed a bug in the horizontal default filter
  50. 3dnow version of the Horizontal & Vertical Lowpass filters
  51. mmx version of the Horizontal Default filter
  52. mmx2 & C versions of a simple filter described in a paper from ramkishor & karandikar
  53. added mode flags & quality2mode function
  54. 0.1.1
  55. */
  56. #include <inttypes.h>
  57. #include <stdio.h>
  58. #include "../config.h"
  59. //#undef HAVE_MMX2
  60. //#define HAVE_3DNOW
  61. //#undef HAVE_MMX
  62. #include "postprocess.h"
  63. static uint64_t packedYOffset= 0x0000000000000000LL;
  64. static uint64_t packedYScale= 0x0100010001000100LL;
  65. static uint64_t w05= 0x0005000500050005LL;
  66. static uint64_t w20= 0x0020002000200020LL;
  67. static uint64_t w1400= 0x1400140014001400LL;
  68. static uint64_t bm00000001= 0x00000000000000FFLL;
  69. static uint64_t bm00010000= 0x000000FF00000000LL;
  70. static uint64_t bm00001000= 0x00000000FF000000LL;
  71. static uint64_t bm10000000= 0xFF00000000000000LL;
  72. static uint64_t bm10000001= 0xFF000000000000FFLL;
  73. static uint64_t bm11000011= 0xFFFF00000000FFFFLL;
  74. static uint64_t bm00000011= 0x000000000000FFFFLL;
  75. static uint64_t bm11000000= 0xFFFF000000000000LL;
  76. static uint64_t bm00011000= 0x000000FFFF000000LL;
  77. static uint64_t bm00110011= 0x0000FFFF0000FFFFLL;
  78. static uint64_t bm11001100= 0xFFFF0000FFFF0000LL;
  79. static uint64_t b00= 0x0000000000000000LL;
  80. static uint64_t b02= 0x0202020202020202LL;
  81. static uint64_t b0F= 0x0F0F0F0F0F0F0F0FLL;
  82. static uint64_t bFF= 0xFFFFFFFFFFFFFFFFLL;
  83. static uint64_t b20= 0x2020202020202020LL;
  84. static uint64_t b80= 0x8080808080808080LL;
  85. static uint64_t b7E= 0x7E7E7E7E7E7E7E7ELL;
  86. static uint64_t b7C= 0x7C7C7C7C7C7C7C7CLL;
  87. static uint64_t b3F= 0x3F3F3F3F3F3F3F3FLL;
  88. static uint64_t temp0=0;
  89. static uint64_t temp1=0;
  90. static uint64_t temp2=0;
  91. static uint64_t temp3=0;
  92. static uint64_t temp4=0;
  93. static uint64_t temp5=0;
  94. static uint64_t pQPb=0;
  95. static uint8_t tempBlock[16*16];
  96. int hFlatnessThreshold= 56 - 16;
  97. int vFlatnessThreshold= 56 - 16;
  98. //amount of "black" u r willing to loose to get a brightness corrected picture
  99. double maxClippedThreshold= 0.01;
  100. int maxAllowedY=255;
  101. //FIXME can never make a movie´s black brighter (anyone needs that?)
  102. int minAllowedY=0;
  103. static inline long long rdtsc()
  104. {
  105. long long l;
  106. asm volatile( "rdtsc\n\t"
  107. : "=A" (l)
  108. );
  109. // printf("%d\n", int(l/1000));
  110. return l;
  111. }
  112. static inline void prefetchnta(void *p)
  113. {
  114. asm volatile( "prefetchnta (%0)\n\t"
  115. : : "r" (p)
  116. );
  117. }
  118. static inline void prefetcht0(void *p)
  119. {
  120. asm volatile( "prefetcht0 (%0)\n\t"
  121. : : "r" (p)
  122. );
  123. }
  124. static inline void prefetcht1(void *p)
  125. {
  126. asm volatile( "prefetcht1 (%0)\n\t"
  127. : : "r" (p)
  128. );
  129. }
  130. static inline void prefetcht2(void *p)
  131. {
  132. asm volatile( "prefetcht2 (%0)\n\t"
  133. : : "r" (p)
  134. );
  135. }
  136. //FIXME? |255-0| = 1 (shouldnt be a problem ...)
  137. /**
  138. * Check if the middle 8x8 Block in the given 8x10 block is flat
  139. */
  140. static inline bool isVertDC(uint8_t src[], int stride){
  141. // return true;
  142. int numEq= 0;
  143. src+= stride; // src points to begin of the 8x8 Block
  144. #ifdef HAVE_MMX
  145. asm volatile(
  146. // "int $3 \n\t"
  147. "pushl %1\n\t"
  148. "movq b7E, %%mm7 \n\t" // mm7 = 0x7F
  149. "movq b7C, %%mm6 \n\t" // mm6 = 0x7D
  150. "movq (%1), %%mm0 \n\t"
  151. "addl %2, %1 \n\t"
  152. "movq (%1), %%mm1 \n\t"
  153. "psubb %%mm1, %%mm0 \n\t" // mm0 = differnece
  154. "paddb %%mm7, %%mm0 \n\t"
  155. "pcmpgtb %%mm6, %%mm0 \n\t"
  156. "addl %2, %1 \n\t"
  157. "movq (%1), %%mm2 \n\t"
  158. "psubb %%mm2, %%mm1 \n\t"
  159. "paddb %%mm7, %%mm1 \n\t"
  160. "pcmpgtb %%mm6, %%mm1 \n\t"
  161. "paddb %%mm1, %%mm0 \n\t"
  162. "addl %2, %1 \n\t"
  163. "movq (%1), %%mm1 \n\t"
  164. "psubb %%mm1, %%mm2 \n\t"
  165. "paddb %%mm7, %%mm2 \n\t"
  166. "pcmpgtb %%mm6, %%mm2 \n\t"
  167. "paddb %%mm2, %%mm0 \n\t"
  168. "addl %2, %1 \n\t"
  169. "movq (%1), %%mm2 \n\t"
  170. "psubb %%mm2, %%mm1 \n\t"
  171. "paddb %%mm7, %%mm1 \n\t"
  172. "pcmpgtb %%mm6, %%mm1 \n\t"
  173. "paddb %%mm1, %%mm0 \n\t"
  174. "addl %2, %1 \n\t"
  175. "movq (%1), %%mm1 \n\t"
  176. "psubb %%mm1, %%mm2 \n\t"
  177. "paddb %%mm7, %%mm2 \n\t"
  178. "pcmpgtb %%mm6, %%mm2 \n\t"
  179. "paddb %%mm2, %%mm0 \n\t"
  180. "addl %2, %1 \n\t"
  181. "movq (%1), %%mm2 \n\t"
  182. "psubb %%mm2, %%mm1 \n\t"
  183. "paddb %%mm7, %%mm1 \n\t"
  184. "pcmpgtb %%mm6, %%mm1 \n\t"
  185. "paddb %%mm1, %%mm0 \n\t"
  186. "addl %2, %1 \n\t"
  187. "movq (%1), %%mm1 \n\t"
  188. "psubb %%mm1, %%mm2 \n\t"
  189. "paddb %%mm7, %%mm2 \n\t"
  190. "pcmpgtb %%mm6, %%mm2 \n\t"
  191. "paddb %%mm2, %%mm0 \n\t"
  192. " \n\t"
  193. "movq %%mm0, %%mm1 \n\t"
  194. "psrlw $8, %%mm0 \n\t"
  195. "paddb %%mm1, %%mm0 \n\t"
  196. "movq %%mm0, %%mm1 \n\t"
  197. "psrlq $16, %%mm0 \n\t"
  198. "paddb %%mm1, %%mm0 \n\t"
  199. "movq %%mm0, %%mm1 \n\t"
  200. "psrlq $32, %%mm0 \n\t"
  201. "paddb %%mm1, %%mm0 \n\t"
  202. "popl %1\n\t"
  203. "movd %%mm0, %0 \n\t"
  204. : "=r" (numEq)
  205. : "r" (src), "r" (stride)
  206. );
  207. // printf("%d\n", numEq);
  208. numEq= (256 - (numEq & 0xFF)) &0xFF;
  209. // int asmEq= numEq;
  210. // numEq=0;
  211. // uint8_t *temp= src;
  212. #else
  213. for(int y=0; y<BLOCK_SIZE-1; y++)
  214. {
  215. if(((src[0] - src[0+stride] + 1)&0xFFFF) < 3) numEq++;
  216. if(((src[1] - src[1+stride] + 1)&0xFFFF) < 3) numEq++;
  217. if(((src[2] - src[2+stride] + 1)&0xFFFF) < 3) numEq++;
  218. if(((src[3] - src[3+stride] + 1)&0xFFFF) < 3) numEq++;
  219. if(((src[4] - src[4+stride] + 1)&0xFFFF) < 3) numEq++;
  220. if(((src[5] - src[5+stride] + 1)&0xFFFF) < 3) numEq++;
  221. if(((src[6] - src[6+stride] + 1)&0xFFFF) < 3) numEq++;
  222. if(((src[7] - src[7+stride] + 1)&0xFFFF) < 3) numEq++;
  223. src+= stride;
  224. }
  225. #endif
  226. /* if(abs(numEq - asmEq) > 0)
  227. {
  228. printf("\nasm:%d c:%d\n", asmEq, numEq);
  229. for(int y=0; y<8; y++)
  230. {
  231. for(int x=0; x<8; x++)
  232. {
  233. printf("%d ", temp[x + y*stride]);
  234. }
  235. printf("\n");
  236. }
  237. }
  238. */
  239. return numEq > vFlatnessThreshold;
  240. }
  241. static inline bool isVertMinMaxOk(uint8_t src[], int stride, int QP)
  242. {
  243. #ifdef HAVE_MMX
  244. int isOk;
  245. asm volatile(
  246. // "int $3 \n\t"
  247. "movq (%1, %2), %%mm0 \n\t"
  248. "movq (%1, %2, 8), %%mm1 \n\t"
  249. "movq %%mm0, %%mm2 \n\t"
  250. "psubusb %%mm1, %%mm0 \n\t"
  251. "psubusb %%mm2, %%mm1 \n\t"
  252. "por %%mm1, %%mm0 \n\t" // ABS Diff
  253. "movq pQPb, %%mm7 \n\t" // QP,..., QP
  254. "paddusb %%mm7, %%mm7 \n\t" // 2QP ... 2QP
  255. "psubusb %%mm7, %%mm0 \n\t" // Diff <= 2QP -> 0
  256. "pcmpeqd b00, %%mm0 \n\t"
  257. "psrlq $16, %%mm0 \n\t"
  258. "pcmpeqd bFF, %%mm0 \n\t"
  259. // "movd %%mm0, (%1, %2, 4)\n\t"
  260. "movd %%mm0, %0 \n\t"
  261. : "=r" (isOk)
  262. : "r" (src), "r" (stride)
  263. );
  264. return isOk;
  265. #else
  266. int isOk2= true;
  267. for(int x=0; x<BLOCK_SIZE; x++)
  268. {
  269. if(abs((int)src[x + stride] - (int)src[x + (stride<<3)]) > 2*QP) isOk2=false;
  270. }
  271. /* if(isOk && !isOk2 || !isOk && isOk2)
  272. {
  273. printf("\nasm:%d c:%d QP:%d\n", isOk, isOk2, QP);
  274. for(int y=0; y<9; y++)
  275. {
  276. for(int x=0; x<8; x++)
  277. {
  278. printf("%d ", src[x + y*stride]);
  279. }
  280. printf("\n");
  281. }
  282. } */
  283. return isOk2;
  284. #endif
  285. }
  286. /**
  287. * Do a vertical low pass filter on the 8x10 block (only write to the 8x8 block in the middle)
  288. * useing the 9-Tap Filter (1,1,2,2,4,2,2,1,1)/16
  289. */
  290. static inline void doVertLowPass(uint8_t *src, int stride, int QP)
  291. {
  292. // QP= 64;
  293. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  294. //#ifdef HAVE_MMX2
  295. asm volatile( //"movv %0 %1 %2\n\t"
  296. "pushl %0 \n\t"
  297. "movq pQPb, %%mm0 \n\t" // QP,..., QP
  298. // "movq bFF , %%mm0 \n\t" // QP,..., QP
  299. "movq (%0), %%mm6 \n\t"
  300. "movq (%0, %1), %%mm5 \n\t"
  301. "movq %%mm5, %%mm1 \n\t"
  302. "movq %%mm6, %%mm2 \n\t"
  303. "psubusb %%mm6, %%mm5 \n\t"
  304. "psubusb %%mm1, %%mm2 \n\t"
  305. "por %%mm5, %%mm2 \n\t" // ABS Diff of lines
  306. "psubusb %%mm0, %%mm2 \n\t" // diff <= QP -> 0
  307. "pcmpeqb b00, %%mm2 \n\t" // diff <= QP -> FF
  308. "pand %%mm2, %%mm6 \n\t"
  309. "pandn %%mm1, %%mm2 \n\t"
  310. "por %%mm2, %%mm6 \n\t"// First Line to Filter
  311. "movq (%0, %1, 8), %%mm5 \n\t"
  312. "leal (%0, %1, 4), %%eax \n\t"
  313. "leal (%0, %1, 8), %%ebx \n\t"
  314. "subl %1, %%ebx \n\t"
  315. "addl %1, %0 \n\t" // %0 points to line 1 not 0
  316. "movq (%0, %1, 8), %%mm7 \n\t"
  317. "movq %%mm5, %%mm1 \n\t"
  318. "movq %%mm7, %%mm2 \n\t"
  319. "psubusb %%mm7, %%mm5 \n\t"
  320. "psubusb %%mm1, %%mm2 \n\t"
  321. "por %%mm5, %%mm2 \n\t" // ABS Diff of lines
  322. "psubusb %%mm0, %%mm2 \n\t" // diff <= QP -> 0
  323. "pcmpeqb b00, %%mm2 \n\t" // diff <= QP -> FF
  324. "pand %%mm2, %%mm7 \n\t"
  325. "pandn %%mm1, %%mm2 \n\t"
  326. "por %%mm2, %%mm7 \n\t" // First Line to Filter
  327. // 1 2 3 4 5 6 7 8
  328. // %0 %0+%1 %0+2%1 eax %0+4%1 eax+2%1 ebx eax+4%1
  329. // 6 4 2 2 1 1
  330. // 6 4 4 2
  331. // 6 8 2
  332. /*
  333. "movq %%mm6, %%mm2 \n\t" //1
  334. "movq %%mm6, %%mm3 \n\t" //1
  335. "paddusb b02, %%mm3 \n\t"
  336. "psrlw $2, %%mm3 \n\t" //1 /4
  337. "pand b3F, %%mm3 \n\t"
  338. "psubb %%mm3, %%mm2 \n\t"
  339. "movq (%0, %1), %%mm0 \n\t" // 1
  340. "movq %%mm0, %%mm1 \n\t" // 1
  341. "paddusb b02, %%mm0 \n\t"
  342. "psrlw $2, %%mm0 \n\t" // 1 /4
  343. "pand b3F, %%mm0 \n\t"
  344. "paddusb %%mm2, %%mm0 \n\t" //3 1 /4
  345. */
  346. "movq (%0, %1), %%mm0 \n\t" // 1
  347. "movq %%mm0, %%mm1 \n\t" // 1
  348. PAVGB(%%mm6, %%mm0) //1 1 /2
  349. PAVGB(%%mm6, %%mm0) //3 1 /4
  350. "movq (%0, %1, 4), %%mm2 \n\t" // 1
  351. "movq %%mm2, %%mm5 \n\t" // 1
  352. PAVGB((%%eax), %%mm2) // 11 /2
  353. PAVGB((%0, %1, 2), %%mm2) // 211 /4
  354. "movq %%mm2, %%mm3 \n\t" // 211 /4
  355. "movq (%0), %%mm4 \n\t" // 1
  356. PAVGB(%%mm4, %%mm3) // 4 211 /8
  357. PAVGB(%%mm0, %%mm3) //642211 /16
  358. "movq %%mm3, (%0) \n\t" // X
  359. // mm1=2 mm2=3(211) mm4=1 mm5=5 mm6=0 mm7=9
  360. "movq %%mm1, %%mm0 \n\t" // 1
  361. PAVGB(%%mm6, %%mm0) //1 1 /2
  362. "movq %%mm4, %%mm3 \n\t" // 1
  363. PAVGB((%0,%1,2), %%mm3) // 1 1 /2
  364. PAVGB((%%eax,%1,2), %%mm5) // 11 /2
  365. PAVGB((%%eax), %%mm5) // 211 /4
  366. PAVGB(%%mm5, %%mm3) // 2 2211 /8
  367. PAVGB(%%mm0, %%mm3) //4242211 /16
  368. "movq %%mm3, (%0,%1) \n\t" // X
  369. // mm1=2 mm2=3(211) mm4=1 mm5=4(211) mm6=0 mm7=9
  370. PAVGB(%%mm4, %%mm6) //11 /2
  371. "movq (%%ebx), %%mm0 \n\t" // 1
  372. PAVGB((%%eax, %1, 2), %%mm0) // 11/2
  373. "movq %%mm0, %%mm3 \n\t" // 11/2
  374. PAVGB(%%mm1, %%mm0) // 2 11/4
  375. PAVGB(%%mm6, %%mm0) //222 11/8
  376. PAVGB(%%mm2, %%mm0) //22242211/16
  377. "movq (%0, %1, 2), %%mm2 \n\t" // 1
  378. "movq %%mm0, (%0, %1, 2) \n\t" // X
  379. // mm1=2 mm2=3 mm3=6(11) mm4=1 mm5=4(211) mm6=0(11) mm7=9
  380. "movq (%%eax, %1, 4), %%mm0 \n\t" // 1
  381. PAVGB((%%ebx), %%mm0) // 11 /2
  382. PAVGB(%%mm0, %%mm6) //11 11 /4
  383. PAVGB(%%mm1, %%mm4) // 11 /2
  384. PAVGB(%%mm2, %%mm1) // 11 /2
  385. PAVGB(%%mm1, %%mm6) //1122 11 /8
  386. PAVGB(%%mm5, %%mm6) //112242211 /16
  387. "movq (%%eax), %%mm5 \n\t" // 1
  388. "movq %%mm6, (%%eax) \n\t" // X
  389. // mm0=7(11) mm1=2(11) mm2=3 mm3=6(11) mm4=1(11) mm5=4 mm7=9
  390. "movq (%%eax, %1, 4), %%mm6 \n\t" // 1
  391. PAVGB(%%mm7, %%mm6) // 11 /2
  392. PAVGB(%%mm4, %%mm6) // 11 11 /4
  393. PAVGB(%%mm3, %%mm6) // 11 2211 /8
  394. PAVGB(%%mm5, %%mm2) // 11 /2
  395. "movq (%0, %1, 4), %%mm4 \n\t" // 1
  396. PAVGB(%%mm4, %%mm2) // 112 /4
  397. PAVGB(%%mm2, %%mm6) // 112242211 /16
  398. "movq %%mm6, (%0, %1, 4) \n\t" // X
  399. // mm0=7(11) mm1=2(11) mm2=3(112) mm3=6(11) mm4=5 mm5=4 mm7=9
  400. PAVGB(%%mm7, %%mm1) // 11 2 /4
  401. PAVGB(%%mm4, %%mm5) // 11 /2
  402. PAVGB(%%mm5, %%mm0) // 11 11 /4
  403. "movq (%%eax, %1, 2), %%mm6 \n\t" // 1
  404. PAVGB(%%mm6, %%mm1) // 11 4 2 /8
  405. PAVGB(%%mm0, %%mm1) // 11224222 /16
  406. // "pxor %%mm1, %%mm1 \n\t"
  407. "movq %%mm1, (%%eax, %1, 2) \n\t" // X
  408. // mm2=3(112) mm3=6(11) mm4=5 mm5=4(11) mm6=6 mm7=9
  409. PAVGB((%%ebx), %%mm2) // 112 4 /8
  410. "movq (%%eax, %1, 4), %%mm0 \n\t" // 1
  411. PAVGB(%%mm0, %%mm6) // 1 1 /2
  412. PAVGB(%%mm7, %%mm6) // 1 12 /4
  413. PAVGB(%%mm2, %%mm6) // 1122424 /4
  414. // "pxor %%mm6, %%mm6 \n\t"
  415. "movq %%mm6, (%%ebx) \n\t" // X
  416. // mm0=8 mm3=6(11) mm4=5 mm5=4(11) mm7=9
  417. PAVGB(%%mm7, %%mm5) // 11 2 /4
  418. PAVGB(%%mm7, %%mm5) // 11 6 /8
  419. PAVGB(%%mm3, %%mm0) // 112 /4
  420. PAVGB(%%mm0, %%mm5) // 112246 /16
  421. // "pxor %%mm5, %%mm5 \n\t"
  422. // "movq pQPb, %%mm5 \n\t"
  423. "movq %%mm5, (%%eax, %1, 4) \n\t" // X
  424. "popl %0\n\t"
  425. :
  426. : "r" (src), "r" (stride)
  427. : "%eax", "%ebx"
  428. );
  429. #else
  430. const int l1= stride;
  431. const int l2= stride + l1;
  432. const int l3= stride + l2;
  433. const int l4= stride + l3;
  434. const int l5= stride + l4;
  435. const int l6= stride + l5;
  436. const int l7= stride + l6;
  437. const int l8= stride + l7;
  438. const int l9= stride + l8;
  439. for(int x=0; x<BLOCK_SIZE; x++)
  440. {
  441. const int first= ABS(src[0] - src[l1]) < QP ? src[0] : src[l1];
  442. const int last= ABS(src[l8] - src[l9]) < QP ? src[l9] : src[l8];
  443. int sums[9];
  444. sums[0] = first + src[l1];
  445. sums[1] = src[l1] + src[l2];
  446. sums[2] = src[l2] + src[l3];
  447. sums[3] = src[l3] + src[l4];
  448. sums[4] = src[l4] + src[l5];
  449. sums[5] = src[l5] + src[l6];
  450. sums[6] = src[l6] + src[l7];
  451. sums[7] = src[l7] + src[l8];
  452. sums[8] = src[l8] + last;
  453. src[l1]= ((sums[0]<<2) + ((first + sums[2])<<1) + sums[4] + 8)>>4;
  454. src[l2]= ((src[l2]<<2) + (first + sums[0] + sums[3]<<1) + sums[5] + 8)>>4;
  455. src[l3]= ((src[l3]<<2) + (first + sums[1] + sums[4]<<1) + sums[6] + 8)>>4;
  456. src[l4]= ((src[l4]<<2) + (sums[2] + sums[5]<<1) + sums[0] + sums[7] + 8)>>4;
  457. src[l5]= ((src[l5]<<2) + (sums[3] + sums[6]<<1) + sums[1] + sums[8] + 8)>>4;
  458. src[l6]= ((src[l6]<<2) + (last + sums[7] + sums[4]<<1) + sums[2] + 8)>>4;
  459. src[l7]= ((last + src[l7]<<2) + (src[l8] + sums[5]<<1) + sums[3] + 8)>>4;
  460. src[l8]= ((sums[8]<<2) + (last + sums[6]<<1) + sums[4] + 8)>>4;
  461. src++;
  462. }
  463. #endif
  464. }
  465. /**
  466. * Experimental implementation of the filter (Algorithm 1) described in a paper from Ramkishor & Karandikar
  467. * values are correctly clipped (MMX2)
  468. * values are wraparound (C)
  469. * conclusion: its fast, but introduces ugly horizontal patterns if there is a continious gradient
  470. 0 8 16 24
  471. x = 8
  472. x/2 = 4
  473. x/8 = 1
  474. 1 12 12 23
  475. */
  476. static inline void vertRKFilter(uint8_t *src, int stride, int QP)
  477. {
  478. #ifdef HAVE_MMX2
  479. // FIXME rounding
  480. asm volatile(
  481. "pxor %%mm7, %%mm7 \n\t" // 0
  482. "movq b80, %%mm6 \n\t" // MIN_SIGNED_BYTE
  483. "leal (%0, %1), %%eax \n\t"
  484. "leal (%%eax, %1, 4), %%ebx \n\t"
  485. // 0 1 2 3 4 5 6 7 8 9
  486. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  487. "movq pQPb, %%mm0 \n\t" // QP,..., QP
  488. "movq %%mm0, %%mm1 \n\t" // QP,..., QP
  489. "paddusb b02, %%mm0 \n\t"
  490. "psrlw $2, %%mm0 \n\t"
  491. "pand b3F, %%mm0 \n\t" // QP/4,..., QP/4
  492. "paddusb %%mm1, %%mm0 \n\t" // QP*1.25 ...
  493. "movq (%0, %1, 4), %%mm2 \n\t" // line 4
  494. "movq (%%ebx), %%mm3 \n\t" // line 5
  495. "movq %%mm2, %%mm4 \n\t" // line 4
  496. "pcmpeqb %%mm5, %%mm5 \n\t" // -1
  497. "pxor %%mm2, %%mm5 \n\t" // -line 4 - 1
  498. "pavgb %%mm3, %%mm5 \n\t"
  499. "paddb %%mm6, %%mm5 \n\t" // (l5-l4)/2
  500. "psubusb %%mm3, %%mm4 \n\t"
  501. "psubusb %%mm2, %%mm3 \n\t"
  502. "por %%mm3, %%mm4 \n\t" // |l4 - l5|
  503. "psubusb %%mm0, %%mm4 \n\t"
  504. "pcmpeqb %%mm7, %%mm4 \n\t"
  505. "pand %%mm4, %%mm5 \n\t" // d/2
  506. // "paddb %%mm6, %%mm2 \n\t" // line 4 + 0x80
  507. "paddb %%mm5, %%mm2 \n\t"
  508. // "psubb %%mm6, %%mm2 \n\t"
  509. "movq %%mm2, (%0,%1, 4) \n\t"
  510. "movq (%%ebx), %%mm2 \n\t"
  511. // "paddb %%mm6, %%mm2 \n\t" // line 5 + 0x80
  512. "psubb %%mm5, %%mm2 \n\t"
  513. // "psubb %%mm6, %%mm2 \n\t"
  514. "movq %%mm2, (%%ebx) \n\t"
  515. "paddb %%mm6, %%mm5 \n\t"
  516. "psrlw $2, %%mm5 \n\t"
  517. "pand b3F, %%mm5 \n\t"
  518. "psubb b20, %%mm5 \n\t" // (l5-l4)/8
  519. "movq (%%eax, %1, 2), %%mm2 \n\t"
  520. "paddb %%mm6, %%mm2 \n\t" // line 3 + 0x80
  521. "paddsb %%mm5, %%mm2 \n\t"
  522. "psubb %%mm6, %%mm2 \n\t"
  523. "movq %%mm2, (%%eax, %1, 2) \n\t"
  524. "movq (%%ebx, %1), %%mm2 \n\t"
  525. "paddb %%mm6, %%mm2 \n\t" // line 6 + 0x80
  526. "psubsb %%mm5, %%mm2 \n\t"
  527. "psubb %%mm6, %%mm2 \n\t"
  528. "movq %%mm2, (%%ebx, %1) \n\t"
  529. :
  530. : "r" (src), "r" (stride)
  531. : "%eax", "%ebx"
  532. );
  533. #else
  534. const int l1= stride;
  535. const int l2= stride + l1;
  536. const int l3= stride + l2;
  537. const int l4= stride + l3;
  538. const int l5= stride + l4;
  539. const int l6= stride + l5;
  540. const int l7= stride + l6;
  541. const int l8= stride + l7;
  542. const int l9= stride + l8;
  543. for(int x=0; x<BLOCK_SIZE; x++)
  544. {
  545. if(ABS(src[l4]-src[l5]) < QP + QP/4)
  546. {
  547. int x = src[l5] - src[l4];
  548. src[l3] +=x/8;
  549. src[l4] +=x/2;
  550. src[l5] -=x/2;
  551. src[l6] -=x/8;
  552. }
  553. src++;
  554. }
  555. #endif
  556. }
  557. /**
  558. * Experimental Filter 1
  559. */
  560. static inline void vertX1Filter(uint8_t *src, int stride, int QP)
  561. {
  562. #ifdef HAVE_MMX2X
  563. // FIXME
  564. asm volatile(
  565. :
  566. : "r" (src), "r" (stride)
  567. : "%eax", "%ebx"
  568. );
  569. #else
  570. const int l1= stride;
  571. const int l2= stride + l1;
  572. const int l3= stride + l2;
  573. const int l4= stride + l3;
  574. const int l5= stride + l4;
  575. const int l6= stride + l5;
  576. const int l7= stride + l6;
  577. const int l8= stride + l7;
  578. const int l9= stride + l8;
  579. for(int x=0; x<BLOCK_SIZE; x++)
  580. {
  581. int v2= src[l2];
  582. int v3= src[l3];
  583. int v4= src[l4];
  584. int v5= src[l5];
  585. int v6= src[l6];
  586. int v7= src[l7];
  587. if(ABS(v4-v5)<QP && ABS(v4-v5) - (ABS(v3-v4) + ABS(v5-v6))>0 )
  588. {
  589. src[l3] = (6*v2 + 4*v3 + 3*v4 + 2*v5 + v6 )/16;
  590. src[l4] = (3*v2 + 3*v3 + 4*v4 + 3*v5 + 2*v6 + v7 )/16;
  591. src[l5] = (1*v2 + 2*v3 + 3*v4 + 4*v5 + 3*v6 + 3*v7)/16;
  592. src[l6] = ( 1*v3 + 2*v4 + 3*v5 + 4*v6 + 6*v7)/16;
  593. }
  594. src++;
  595. }
  596. #endif
  597. }
  598. static inline void doVertDefFilter(uint8_t src[], int stride, int QP)
  599. {
  600. #ifdef HAVE_MMX
  601. src+= stride;
  602. //FIXME try pmul for *5 stuff
  603. // src[0]=0;
  604. asm volatile(
  605. "pxor %%mm7, %%mm7 \n\t"
  606. "leal (%0, %1), %%eax \n\t"
  607. "leal (%%eax, %1, 4), %%ebx \n\t"
  608. // 0 1 2 3 4 5 6 7
  609. // %0 %0+%1 %0+2%1 eax+2%1 %0+4%1 eax+4%1 ebx+%1 ebx+2%1
  610. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1
  611. "movq (%0), %%mm0 \n\t"
  612. "movq %%mm0, %%mm1 \n\t"
  613. "punpcklbw %%mm7, %%mm0 \n\t" // low part of line 0
  614. "punpckhbw %%mm7, %%mm1 \n\t" // high part of line 0
  615. "movq (%%eax), %%mm2 \n\t"
  616. "movq %%mm2, %%mm3 \n\t"
  617. "punpcklbw %%mm7, %%mm2 \n\t" // low part of line 1
  618. "punpckhbw %%mm7, %%mm3 \n\t" // high part of line 1
  619. "movq (%%eax, %1), %%mm4 \n\t"
  620. "movq %%mm4, %%mm5 \n\t"
  621. "punpcklbw %%mm7, %%mm4 \n\t" // low part of line 2
  622. "punpckhbw %%mm7, %%mm5 \n\t" // high part of line 2
  623. "paddw %%mm0, %%mm0 \n\t" // 2L0
  624. "paddw %%mm1, %%mm1 \n\t" // 2H0
  625. "psubw %%mm4, %%mm2 \n\t" // L1 - L2
  626. "psubw %%mm5, %%mm3 \n\t" // H1 - H2
  627. "psubw %%mm2, %%mm0 \n\t" // 2L0 - L1 + L2
  628. "psubw %%mm3, %%mm1 \n\t" // 2H0 - H1 + H2
  629. "psllw $2, %%mm2 \n\t" // 4L1 - 4L2
  630. "psllw $2, %%mm3 \n\t" // 4H1 - 4H2
  631. "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2
  632. "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2
  633. "movq (%%eax, %1, 2), %%mm2 \n\t"
  634. "movq %%mm2, %%mm3 \n\t"
  635. "punpcklbw %%mm7, %%mm2 \n\t" // L3
  636. "punpckhbw %%mm7, %%mm3 \n\t" // H3
  637. "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2 - L3
  638. "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2 - H3
  639. "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
  640. "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
  641. "movq %%mm0, temp0 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
  642. "movq %%mm1, temp1 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
  643. "movq (%0, %1, 4), %%mm0 \n\t"
  644. "movq %%mm0, %%mm1 \n\t"
  645. "punpcklbw %%mm7, %%mm0 \n\t" // L4
  646. "punpckhbw %%mm7, %%mm1 \n\t" // H4
  647. "psubw %%mm0, %%mm2 \n\t" // L3 - L4
  648. "psubw %%mm1, %%mm3 \n\t" // H3 - H4
  649. "movq %%mm2, temp2 \n\t" // L3 - L4
  650. "movq %%mm3, temp3 \n\t" // H3 - H4
  651. "paddw %%mm4, %%mm4 \n\t" // 2L2
  652. "paddw %%mm5, %%mm5 \n\t" // 2H2
  653. "psubw %%mm2, %%mm4 \n\t" // 2L2 - L3 + L4
  654. "psubw %%mm3, %%mm5 \n\t" // 2H2 - H3 + H4
  655. "psllw $2, %%mm2 \n\t" // 4L3 - 4L4
  656. "psllw $2, %%mm3 \n\t" // 4H3 - 4H4
  657. "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4
  658. "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4
  659. //50 opcodes so far
  660. "movq (%%ebx), %%mm2 \n\t"
  661. "movq %%mm2, %%mm3 \n\t"
  662. "punpcklbw %%mm7, %%mm2 \n\t" // L5
  663. "punpckhbw %%mm7, %%mm3 \n\t" // H5
  664. "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4 - L5
  665. "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4 - H5
  666. "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4 - 2L5
  667. "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4 - 2H5
  668. "movq (%%ebx, %1), %%mm6 \n\t"
  669. "punpcklbw %%mm7, %%mm6 \n\t" // L6
  670. "psubw %%mm6, %%mm2 \n\t" // L5 - L6
  671. "movq (%%ebx, %1), %%mm6 \n\t"
  672. "punpckhbw %%mm7, %%mm6 \n\t" // H6
  673. "psubw %%mm6, %%mm3 \n\t" // H5 - H6
  674. "paddw %%mm0, %%mm0 \n\t" // 2L4
  675. "paddw %%mm1, %%mm1 \n\t" // 2H4
  676. "psubw %%mm2, %%mm0 \n\t" // 2L4 - L5 + L6
  677. "psubw %%mm3, %%mm1 \n\t" // 2H4 - H5 + H6
  678. "psllw $2, %%mm2 \n\t" // 4L5 - 4L6
  679. "psllw $2, %%mm3 \n\t" // 4H5 - 4H6
  680. "psubw %%mm2, %%mm0 \n\t" // 2L4 - 5L5 + 5L6
  681. "psubw %%mm3, %%mm1 \n\t" // 2H4 - 5H5 + 5H6
  682. "movq (%%ebx, %1, 2), %%mm2 \n\t"
  683. "movq %%mm2, %%mm3 \n\t"
  684. "punpcklbw %%mm7, %%mm2 \n\t" // L7
  685. "punpckhbw %%mm7, %%mm3 \n\t" // H7
  686. "paddw %%mm2, %%mm2 \n\t" // 2L7
  687. "paddw %%mm3, %%mm3 \n\t" // 2H7
  688. "psubw %%mm2, %%mm0 \n\t" // 2L4 - 5L5 + 5L6 - 2L7
  689. "psubw %%mm3, %%mm1 \n\t" // 2H4 - 5H5 + 5H6 - 2H7
  690. "movq temp0, %%mm2 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
  691. "movq temp1, %%mm3 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
  692. //FIXME pxor, psubw, pmax for abs
  693. "movq %%mm7, %%mm6 \n\t" // 0
  694. "pcmpgtw %%mm0, %%mm6 \n\t"
  695. "pxor %%mm6, %%mm0 \n\t"
  696. "psubw %%mm6, %%mm0 \n\t" // |2L4 - 5L5 + 5L6 - 2L7|
  697. "movq %%mm7, %%mm6 \n\t" // 0
  698. "pcmpgtw %%mm1, %%mm6 \n\t"
  699. "pxor %%mm6, %%mm1 \n\t"
  700. "psubw %%mm6, %%mm1 \n\t" // |2H4 - 5H5 + 5H6 - 2H7|
  701. "movq %%mm7, %%mm6 \n\t" // 0
  702. "pcmpgtw %%mm2, %%mm6 \n\t"
  703. "pxor %%mm6, %%mm2 \n\t"
  704. "psubw %%mm6, %%mm2 \n\t" // |2L0 - 5L1 + 5L2 - 2L3|
  705. "movq %%mm7, %%mm6 \n\t" // 0
  706. "pcmpgtw %%mm3, %%mm6 \n\t"
  707. "pxor %%mm6, %%mm3 \n\t"
  708. "psubw %%mm6, %%mm3 \n\t" // |2H0 - 5H1 + 5H2 - 2H3|
  709. #ifdef HAVE_MMX2
  710. "pminsw %%mm2, %%mm0 \n\t"
  711. "pminsw %%mm3, %%mm1 \n\t"
  712. #else
  713. "movq %%mm0, %%mm6 \n\t"
  714. "psubusw %%mm2, %%mm6 \n\t"
  715. "psubw %%mm6, %%mm0 \n\t"
  716. "movq %%mm1, %%mm6 \n\t"
  717. "psubusw %%mm3, %%mm6 \n\t"
  718. "psubw %%mm6, %%mm1 \n\t"
  719. #endif
  720. "movq %%mm7, %%mm6 \n\t" // 0
  721. "pcmpgtw %%mm4, %%mm6 \n\t" // sign(2L2 - 5L3 + 5L4 - 2L5)
  722. "pxor %%mm6, %%mm4 \n\t"
  723. "psubw %%mm6, %%mm4 \n\t" // |2L2 - 5L3 + 5L4 - 2L5|
  724. "pcmpgtw %%mm5, %%mm7 \n\t" // sign(2H2 - 5H3 + 5H4 - 2H5)
  725. "pxor %%mm7, %%mm5 \n\t"
  726. "psubw %%mm7, %%mm5 \n\t" // |2H2 - 5H3 + 5H4 - 2H5|
  727. // 100 opcodes
  728. "movd %2, %%mm2 \n\t" // QP
  729. //"pcmpeqb %%mm2, %%mm2\n\t"
  730. "punpcklwd %%mm2, %%mm2 \n\t"
  731. "punpcklwd %%mm2, %%mm2 \n\t"
  732. "psllw $3, %%mm2 \n\t" // 8QP
  733. "movq %%mm2, %%mm3 \n\t" // 8QP
  734. "pcmpgtw %%mm4, %%mm2 \n\t"
  735. "pcmpgtw %%mm5, %%mm3 \n\t"
  736. "pand %%mm2, %%mm4 \n\t"
  737. "pand %%mm3, %%mm5 \n\t"
  738. "psubusw %%mm0, %%mm4 \n\t" // hd
  739. "psubusw %%mm1, %%mm5 \n\t" // ld
  740. "movq w05, %%mm2 \n\t" // 5
  741. "pmullw %%mm2, %%mm4 \n\t"
  742. "pmullw %%mm2, %%mm5 \n\t"
  743. "movq w20, %%mm2 \n\t" // 32
  744. "paddw %%mm2, %%mm4 \n\t"
  745. "paddw %%mm2, %%mm5 \n\t"
  746. "psrlw $6, %%mm4 \n\t"
  747. "psrlw $6, %%mm5 \n\t"
  748. /*
  749. "movq w06, %%mm2 \n\t" // 6
  750. "paddw %%mm2, %%mm4 \n\t"
  751. "paddw %%mm2, %%mm5 \n\t"
  752. "movq w1400, %%mm2 \n\t" // 1400h = 5120 = 5/64*2^16
  753. //FIXME if *5/64 is supposed to be /13 then we should use 5041 instead of 5120
  754. "pmulhw %%mm2, %%mm4 \n\t" // hd/13
  755. "pmulhw %%mm2, %%mm5 \n\t" // ld/13
  756. */
  757. "movq temp2, %%mm0 \n\t" // L3 - L4
  758. "movq temp3, %%mm1 \n\t" // H3 - H4
  759. "pxor %%mm2, %%mm2 \n\t"
  760. "pxor %%mm3, %%mm3 \n\t"
  761. // FIXME rounding error
  762. "psraw $1, %%mm0 \n\t" // (L3 - L4)/2
  763. "psraw $1, %%mm1 \n\t" // (H3 - H4)/2
  764. "pcmpgtw %%mm0, %%mm2 \n\t" // sign (L3-L4)
  765. "pcmpgtw %%mm1, %%mm3 \n\t" // sign (H3-H4)
  766. "pxor %%mm2, %%mm0 \n\t"
  767. "pxor %%mm3, %%mm1 \n\t"
  768. "psubw %%mm2, %%mm0 \n\t" // |L3-L4|
  769. "psubw %%mm3, %%mm1 \n\t" // |H3-H4|
  770. // "psrlw $1, %%mm0 \n\t" // |L3 - L4|/2
  771. // "psrlw $1, %%mm1 \n\t" // |H3 - H4|/2
  772. "pxor %%mm6, %%mm2 \n\t"
  773. "pxor %%mm7, %%mm3 \n\t"
  774. "pand %%mm2, %%mm4 \n\t"
  775. "pand %%mm3, %%mm5 \n\t"
  776. #ifdef HAVE_MMX2
  777. "pminsw %%mm0, %%mm4 \n\t"
  778. "pminsw %%mm1, %%mm5 \n\t"
  779. #else
  780. "movq %%mm4, %%mm2 \n\t"
  781. "psubusw %%mm0, %%mm2 \n\t"
  782. "psubw %%mm2, %%mm4 \n\t"
  783. "movq %%mm5, %%mm2 \n\t"
  784. "psubusw %%mm1, %%mm2 \n\t"
  785. "psubw %%mm2, %%mm5 \n\t"
  786. #endif
  787. "pxor %%mm6, %%mm4 \n\t"
  788. "pxor %%mm7, %%mm5 \n\t"
  789. "psubw %%mm6, %%mm4 \n\t"
  790. "psubw %%mm7, %%mm5 \n\t"
  791. "packsswb %%mm5, %%mm4 \n\t"
  792. "movq (%%eax, %1, 2), %%mm0 \n\t"
  793. "paddb %%mm4, %%mm0 \n\t"
  794. "movq %%mm0, (%%eax, %1, 2) \n\t"
  795. "movq (%0, %1, 4), %%mm0 \n\t"
  796. "psubb %%mm4, %%mm0 \n\t"
  797. // "pxor %%mm0, %%mm0 \n\t"
  798. "movq %%mm0, (%0, %1, 4) \n\t"
  799. :
  800. : "r" (src), "r" (stride), "r" (QP)
  801. : "%eax", "%ebx"
  802. );
  803. #else
  804. const int l1= stride;
  805. const int l2= stride + l1;
  806. const int l3= stride + l2;
  807. const int l4= stride + l3;
  808. const int l5= stride + l4;
  809. const int l6= stride + l5;
  810. const int l7= stride + l6;
  811. const int l8= stride + l7;
  812. // const int l9= stride + l8;
  813. for(int x=0; x<BLOCK_SIZE; x++)
  814. {
  815. const int middleEnergy= 5*(src[l5] - src[l4]) + 2*(src[l3] - src[l6]);
  816. if(ABS(middleEnergy) < 8*QP)
  817. {
  818. const int q=(src[l4] - src[l5])/2;
  819. const int leftEnergy= 5*(src[l3] - src[l2]) + 2*(src[l1] - src[l4]);
  820. const int rightEnergy= 5*(src[l7] - src[l6]) + 2*(src[l5] - src[l8]);
  821. int d= ABS(middleEnergy) - MIN( ABS(leftEnergy), ABS(rightEnergy) );
  822. d= MAX(d, 0);
  823. d= (5*d + 32) >> 6;
  824. d*= SIGN(-middleEnergy);
  825. if(q>0)
  826. {
  827. d= d<0 ? 0 : d;
  828. d= d>q ? q : d;
  829. }
  830. else
  831. {
  832. d= d>0 ? 0 : d;
  833. d= d<q ? q : d;
  834. }
  835. src[l4]-= d;
  836. src[l5]+= d;
  837. }
  838. src++;
  839. }
  840. #endif
  841. }
  842. //FIXME? |255-0| = 1
  843. /**
  844. * Check if the given 8x8 Block is mostly "flat" and copy the unaliged data into tempBlock.
  845. */
  846. static inline bool isHorizDCAndCopy2Temp(uint8_t src[], int stride)
  847. {
  848. // src++;
  849. int numEq= 0;
  850. #ifdef HAVE_MMX
  851. asm volatile (
  852. // "int $3 \n\t"
  853. "pushl %1\n\t"
  854. "movq b7E, %%mm7 \n\t" // mm7 = 0x7F
  855. "movq b7C, %%mm6 \n\t" // mm6 = 0x7D
  856. "leal tempBlock, %%eax \n\t"
  857. "pxor %%mm0, %%mm0 \n\t"
  858. #define HDC_CHECK_AND_CPY(i) \
  859. "movq -4(%1), %%mm2 \n\t"\
  860. "psrlq $32, %%mm2 \n\t"\
  861. "punpckldq 4(%1), %%mm2 \n\t" /* (%1) */\
  862. "movq %%mm2, %%mm1 \n\t"\
  863. "psrlq $8, %%mm2 \n\t"\
  864. "psubb %%mm1, %%mm2 \n\t"\
  865. "paddb %%mm7, %%mm2 \n\t"\
  866. "pcmpgtb %%mm6, %%mm2 \n\t"\
  867. "paddb %%mm2, %%mm0 \n\t"\
  868. "movq %%mm1," #i "(%%eax) \n\t"
  869. HDC_CHECK_AND_CPY(0)
  870. "addl %2, %1 \n\t"
  871. HDC_CHECK_AND_CPY(8)
  872. "addl %2, %1 \n\t"
  873. HDC_CHECK_AND_CPY(16)
  874. "addl %2, %1 \n\t"
  875. HDC_CHECK_AND_CPY(24)
  876. "addl %2, %1 \n\t"
  877. HDC_CHECK_AND_CPY(32)
  878. "addl %2, %1 \n\t"
  879. HDC_CHECK_AND_CPY(40)
  880. "addl %2, %1 \n\t"
  881. HDC_CHECK_AND_CPY(48)
  882. "addl %2, %1 \n\t"
  883. HDC_CHECK_AND_CPY(56)
  884. "psllq $8, %%mm0 \n\t" // remove dummy value
  885. "movq %%mm0, %%mm1 \n\t"
  886. "psrlw $8, %%mm0 \n\t"
  887. "paddb %%mm1, %%mm0 \n\t"
  888. "movq %%mm0, %%mm1 \n\t"
  889. "psrlq $16, %%mm0 \n\t"
  890. "paddb %%mm1, %%mm0 \n\t"
  891. "movq %%mm0, %%mm1 \n\t"
  892. "psrlq $32, %%mm0 \n\t"
  893. "paddb %%mm1, %%mm0 \n\t"
  894. "popl %1\n\t"
  895. "movd %%mm0, %0 \n\t"
  896. : "=r" (numEq)
  897. : "r" (src), "r" (stride)
  898. : "%eax"
  899. );
  900. // printf("%d\n", numEq);
  901. numEq= (256 - (numEq & 0xFF)) &0xFF;
  902. #else
  903. for(int y=0; y<BLOCK_SIZE; y++)
  904. {
  905. if(((src[0] - src[1] + 1) & 0xFFFF) < 3) numEq++;
  906. if(((src[1] - src[2] + 1) & 0xFFFF) < 3) numEq++;
  907. if(((src[2] - src[3] + 1) & 0xFFFF) < 3) numEq++;
  908. if(((src[3] - src[4] + 1) & 0xFFFF) < 3) numEq++;
  909. if(((src[4] - src[5] + 1) & 0xFFFF) < 3) numEq++;
  910. if(((src[5] - src[6] + 1) & 0xFFFF) < 3) numEq++;
  911. if(((src[6] - src[7] + 1) & 0xFFFF) < 3) numEq++;
  912. tempBlock[0 + y*TEMP_STRIDE] = src[0];
  913. tempBlock[1 + y*TEMP_STRIDE] = src[1];
  914. tempBlock[2 + y*TEMP_STRIDE] = src[2];
  915. tempBlock[3 + y*TEMP_STRIDE] = src[3];
  916. tempBlock[4 + y*TEMP_STRIDE] = src[4];
  917. tempBlock[5 + y*TEMP_STRIDE] = src[5];
  918. tempBlock[6 + y*TEMP_STRIDE] = src[6];
  919. tempBlock[7 + y*TEMP_STRIDE] = src[7];
  920. src+= stride;
  921. }
  922. #endif
  923. /* if(abs(numEq - asmEq) > 0)
  924. {
  925. // printf("\nasm:%d c:%d\n", asmEq, numEq);
  926. for(int y=0; y<8; y++)
  927. {
  928. for(int x=0; x<8; x++)
  929. {
  930. printf("%d ", src[x + y*stride]);
  931. }
  932. printf("\n");
  933. }
  934. }
  935. */
  936. // printf("%d\n", numEq);
  937. return numEq > hFlatnessThreshold;
  938. }
  939. static inline bool isHorizMinMaxOk(uint8_t src[], int stride, int QP)
  940. {
  941. #ifdef MMX_FIXME
  942. FIXME
  943. int isOk;
  944. asm volatile(
  945. // "int $3 \n\t"
  946. "movq (%1, %2), %%mm0 \n\t"
  947. "movq (%1, %2, 8), %%mm1 \n\t"
  948. "movq %%mm0, %%mm2 \n\t"
  949. "psubusb %%mm1, %%mm0 \n\t"
  950. "psubusb %%mm2, %%mm1 \n\t"
  951. "por %%mm1, %%mm0 \n\t" // ABS Diff
  952. "movq pQPb, %%mm7 \n\t" // QP,..., QP
  953. "paddusb %%mm7, %%mm7 \n\t" // 2QP ... 2QP
  954. "psubusb %%mm7, %%mm0 \n\t" // Diff <= 2QP -> 0
  955. "pcmpeqd b00, %%mm0 \n\t"
  956. "psrlq $16, %%mm0 \n\t"
  957. "pcmpeqd bFF, %%mm0 \n\t"
  958. // "movd %%mm0, (%1, %2, 4)\n\t"
  959. "movd %%mm0, %0 \n\t"
  960. : "=r" (isOk)
  961. : "r" (src), "r" (stride)
  962. );
  963. return isOk;
  964. #else
  965. if(abs(src[0] - src[7]) > 2*QP) return false;
  966. return true;
  967. #endif
  968. }
  969. static inline void doHorizDefFilterAndCopyBack(uint8_t dst[], int stride, int QP)
  970. {
  971. #ifdef HAVE_MMX
  972. asm volatile(
  973. "pushl %0 \n\t"
  974. "pxor %%mm7, %%mm7 \n\t"
  975. "movq bm00001000, %%mm6 \n\t"
  976. "movd %2, %%mm5 \n\t" // QP
  977. "movq %%mm5, %%mm4 \n\t"
  978. "paddusb %%mm5, %%mm5 \n\t" // 2QP
  979. "paddusb %%mm5, %%mm4 \n\t" // 3QP
  980. "psllq $24, %%mm4 \n\t"
  981. "pxor %%mm5, %%mm5 \n\t" // 0
  982. "psubb %%mm4, %%mm5 \n\t" // -QP
  983. "leal tempBlock, %%eax \n\t"
  984. //FIXME? "unroll by 2" and mix
  985. #ifdef HAVE_MMX2
  986. #define HDF(i) \
  987. "movq " #i "(%%eax), %%mm0 \n\t"\
  988. "movq %%mm0, %%mm1 \n\t"\
  989. "movq %%mm0, %%mm2 \n\t"\
  990. "psrlq $8, %%mm1 \n\t"\
  991. "psubusb %%mm1, %%mm2 \n\t"\
  992. "psubusb %%mm0, %%mm1 \n\t"\
  993. "por %%mm2, %%mm1 \n\t" /* p´x = |px - p(x+1)| */\
  994. "pcmpeqb %%mm7, %%mm2 \n\t" /* p´x = sgn[px - p(x+1)] */\
  995. "pshufw $0x00, %%mm1, %%mm3 \n\t" /* p´5 = |p1 - p2| */\
  996. "pminub %%mm1, %%mm3 \n\t" /* p´5 = min(|p2-p1|, |p6-p5|)*/\
  997. "psrlq $16, %%mm3 \n\t" /* p´3 = min(|p2-p1|, |p6-p5|)*/\
  998. "psubusb %%mm3, %%mm1 \n\t" /* |p3-p4|-min(|p1-p2|,|p5-p6|) */\
  999. "paddb %%mm5, %%mm1 \n\t"\
  1000. "psubusb %%mm5, %%mm1 \n\t"\
  1001. "psrlw $2, %%mm1 \n\t"\
  1002. "pxor %%mm2, %%mm1 \n\t"\
  1003. "psubb %%mm2, %%mm1 \n\t"\
  1004. "pand %%mm6, %%mm1 \n\t"\
  1005. "psubb %%mm1, %%mm0 \n\t"\
  1006. "psllq $8, %%mm1 \n\t"\
  1007. "paddb %%mm1, %%mm0 \n\t"\
  1008. "movd %%mm0, (%0) \n\t"\
  1009. "psrlq $32, %%mm0 \n\t"\
  1010. "movd %%mm0, 4(%0) \n\t"
  1011. #else
  1012. #define HDF(i)\
  1013. "movq " #i "(%%eax), %%mm0 \n\t"\
  1014. "movq %%mm0, %%mm1 \n\t"\
  1015. "movq %%mm0, %%mm2 \n\t"\
  1016. "psrlq $8, %%mm1 \n\t"\
  1017. "psubusb %%mm1, %%mm2 \n\t"\
  1018. "psubusb %%mm0, %%mm1 \n\t"\
  1019. "por %%mm2, %%mm1 \n\t" /* p´x = |px - p(x+1)| */\
  1020. "pcmpeqb %%mm7, %%mm2 \n\t" /* p´x = sgn[px - p(x+1)] */\
  1021. "movq %%mm1, %%mm3 \n\t"\
  1022. "psllq $32, %%mm3 \n\t"\
  1023. "movq %%mm3, %%mm4 \n\t"\
  1024. "psubusb %%mm1, %%mm4 \n\t"\
  1025. "psubb %%mm4, %%mm3 \n\t"\
  1026. "psrlq $16, %%mm3 \n\t" /* p´3 = min(|p2-p1|, |p6-p5|)*/\
  1027. "psubusb %%mm3, %%mm1 \n\t" /* |p3-p4|-min(|p1-p2|,|p5,ü6|) */\
  1028. "paddb %%mm5, %%mm1 \n\t"\
  1029. "psubusb %%mm5, %%mm1 \n\t"\
  1030. "psrlw $2, %%mm1 \n\t"\
  1031. "pxor %%mm2, %%mm1 \n\t"\
  1032. "psubb %%mm2, %%mm1 \n\t"\
  1033. "pand %%mm6, %%mm1 \n\t"\
  1034. "psubb %%mm1, %%mm0 \n\t"\
  1035. "psllq $8, %%mm1 \n\t"\
  1036. "paddb %%mm1, %%mm0 \n\t"\
  1037. "movd %%mm0, (%0) \n\t"\
  1038. "psrlq $32, %%mm0 \n\t"\
  1039. "movd %%mm0, 4(%0) \n\t"
  1040. #endif
  1041. HDF(0)
  1042. "addl %1, %0 \n\t"
  1043. HDF(8)
  1044. "addl %1, %0 \n\t"
  1045. HDF(16)
  1046. "addl %1, %0 \n\t"
  1047. HDF(24)
  1048. "addl %1, %0 \n\t"
  1049. HDF(32)
  1050. "addl %1, %0 \n\t"
  1051. HDF(40)
  1052. "addl %1, %0 \n\t"
  1053. HDF(48)
  1054. "addl %1, %0 \n\t"
  1055. HDF(56)
  1056. "popl %0 \n\t"
  1057. :
  1058. : "r" (dst), "r" (stride), "r" (QP)
  1059. : "%eax"
  1060. );
  1061. #else
  1062. uint8_t *src= tempBlock;
  1063. for(int y=0; y<BLOCK_SIZE; y++)
  1064. {
  1065. dst[0] = src[0];
  1066. dst[1] = src[1];
  1067. dst[2] = src[2];
  1068. dst[3] = src[3];
  1069. dst[4] = src[4];
  1070. dst[5] = src[5];
  1071. dst[6] = src[6];
  1072. dst[7] = src[7];
  1073. const int middleEnergy= 5*(src[4] - src[5]) + 2*(src[2] - src[5]);
  1074. if(ABS(middleEnergy) < 8*QP)
  1075. {
  1076. const int q=(src[3] - src[4])/2;
  1077. const int leftEnergy= 5*(src[2] - src[1]) + 2*(src[0] - src[3]);
  1078. const int rightEnergy= 5*(src[6] - src[5]) + 2*(src[4] - src[7]);
  1079. int d= ABS(middleEnergy) - MIN( ABS(leftEnergy), ABS(rightEnergy) );
  1080. d= MAX(d, 0);
  1081. d= (5*d + 32) >> 6;
  1082. d*= SIGN(-middleEnergy);
  1083. if(q>0)
  1084. {
  1085. d= d<0 ? 0 : d;
  1086. d= d>q ? q : d;
  1087. }
  1088. else
  1089. {
  1090. d= d>0 ? 0 : d;
  1091. d= d<q ? q : d;
  1092. }
  1093. dst[3]-= d;
  1094. dst[4]+= d;
  1095. }
  1096. dst+= stride;
  1097. src+= TEMP_STRIDE;
  1098. }
  1099. #endif
  1100. }
  1101. /**
  1102. * Do a horizontal low pass filter on the 8x8 block
  1103. * useing the 9-Tap Filter (1,1,2,2,4,2,2,1,1)/16 (C version)
  1104. * useing approximately the 7-Tap Filter (1,2,3,4,3,2,1)/16 (MMX2/3DNOW version)
  1105. */
  1106. static inline void doHorizLowPassAndCopyBack(uint8_t dst[], int stride, int QP)
  1107. {
  1108. //return;
  1109. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1110. asm volatile( //"movv %0 %1 %2\n\t"
  1111. "pushl %0\n\t"
  1112. "pxor %%mm7, %%mm7 \n\t"
  1113. "leal tempBlock, %%eax \n\t"
  1114. /*
  1115. #define HLP1 "movq (%0), %%mm0 \n\t"\
  1116. "movq %%mm0, %%mm1 \n\t"\
  1117. "psllq $8, %%mm0 \n\t"\
  1118. PAVGB(%%mm1, %%mm0)\
  1119. "psrlw $8, %%mm0 \n\t"\
  1120. "pxor %%mm1, %%mm1 \n\t"\
  1121. "packuswb %%mm1, %%mm0 \n\t"\
  1122. "movq %%mm0, %%mm1 \n\t"\
  1123. "movq %%mm0, %%mm2 \n\t"\
  1124. "psllq $32, %%mm0 \n\t"\
  1125. "paddb %%mm0, %%mm1 \n\t"\
  1126. "psllq $16, %%mm2 \n\t"\
  1127. PAVGB(%%mm2, %%mm0)\
  1128. "movq %%mm0, %%mm3 \n\t"\
  1129. "pand bm11001100, %%mm0 \n\t"\
  1130. "paddusb %%mm0, %%mm3 \n\t"\
  1131. "psrlq $8, %%mm3 \n\t"\
  1132. PAVGB(%%mm1, %%mm4)\
  1133. PAVGB(%%mm3, %%mm2)\
  1134. "psrlq $16, %%mm2 \n\t"\
  1135. "punpcklbw %%mm2, %%mm2 \n\t"\
  1136. "movq %%mm2, (%0) \n\t"\
  1137. #define HLP2 "movq (%0), %%mm0 \n\t"\
  1138. "movq %%mm0, %%mm1 \n\t"\
  1139. "psllq $8, %%mm0 \n\t"\
  1140. PAVGB(%%mm1, %%mm0)\
  1141. "psrlw $8, %%mm0 \n\t"\
  1142. "pxor %%mm1, %%mm1 \n\t"\
  1143. "packuswb %%mm1, %%mm0 \n\t"\
  1144. "movq %%mm0, %%mm2 \n\t"\
  1145. "psllq $32, %%mm0 \n\t"\
  1146. "psllq $16, %%mm2 \n\t"\
  1147. PAVGB(%%mm2, %%mm0)\
  1148. "movq %%mm0, %%mm3 \n\t"\
  1149. "pand bm11001100, %%mm0 \n\t"\
  1150. "paddusb %%mm0, %%mm3 \n\t"\
  1151. "psrlq $8, %%mm3 \n\t"\
  1152. PAVGB(%%mm3, %%mm2)\
  1153. "psrlq $16, %%mm2 \n\t"\
  1154. "punpcklbw %%mm2, %%mm2 \n\t"\
  1155. "movq %%mm2, (%0) \n\t"\
  1156. */
  1157. // approximately a 7-Tap Filter with Vector (1,2,3,4,3,2,1)/16
  1158. /*
  1159. 31
  1160. 121
  1161. 121
  1162. 121
  1163. 121
  1164. 121
  1165. 121
  1166. 13
  1167. Implemented Exact 7-Tap
  1168. 9421 A321
  1169. 36421 64321
  1170. 334321 =
  1171. 1234321 =
  1172. 1234321 =
  1173. 123433 =
  1174. 12463 12346
  1175. 1249 123A
  1176. */
  1177. #ifdef HAVE_MMX2
  1178. #define HLP3(i) "movq " #i "(%%eax), %%mm0 \n\t"\
  1179. "movq %%mm0, %%mm1 \n\t"\
  1180. "movq %%mm0, %%mm2 \n\t"\
  1181. "movq %%mm0, %%mm3 \n\t"\
  1182. "movq %%mm0, %%mm4 \n\t"\
  1183. "psllq $8, %%mm1 \n\t"\
  1184. "psrlq $8, %%mm2 \n\t"\
  1185. "pand bm00000001, %%mm3 \n\t"\
  1186. "pand bm10000000, %%mm4 \n\t"\
  1187. "por %%mm3, %%mm1 \n\t"\
  1188. "por %%mm4, %%mm2 \n\t"\
  1189. PAVGB(%%mm2, %%mm1)\
  1190. PAVGB(%%mm1, %%mm0)\
  1191. \
  1192. "pshufw $0xF9, %%mm0, %%mm3 \n\t"\
  1193. "pshufw $0x90, %%mm0, %%mm4 \n\t"\
  1194. PAVGB(%%mm3, %%mm4)\
  1195. PAVGB(%%mm4, %%mm0)\
  1196. "movd %%mm0, (%0) \n\t"\
  1197. "psrlq $32, %%mm0 \n\t"\
  1198. "movd %%mm0, 4(%0) \n\t"
  1199. #else
  1200. #define HLP3(i) "movq " #i "(%%eax), %%mm0 \n\t"\
  1201. "movq %%mm0, %%mm1 \n\t"\
  1202. "movq %%mm0, %%mm2 \n\t"\
  1203. "movq %%mm0, %%mm3 \n\t"\
  1204. "movq %%mm0, %%mm4 \n\t"\
  1205. "psllq $8, %%mm1 \n\t"\
  1206. "psrlq $8, %%mm2 \n\t"\
  1207. "pand bm00000001, %%mm3 \n\t"\
  1208. "pand bm10000000, %%mm4 \n\t"\
  1209. "por %%mm3, %%mm1 \n\t"\
  1210. "por %%mm4, %%mm2 \n\t"\
  1211. PAVGB(%%mm2, %%mm1)\
  1212. PAVGB(%%mm1, %%mm0)\
  1213. \
  1214. "movq %%mm0, %%mm3 \n\t"\
  1215. "movq %%mm0, %%mm4 \n\t"\
  1216. "movq %%mm0, %%mm5 \n\t"\
  1217. "psrlq $16, %%mm3 \n\t"\
  1218. "psllq $16, %%mm4 \n\t"\
  1219. "pand bm11000000, %%mm5 \n\t"\
  1220. "por %%mm5, %%mm3 \n\t"\
  1221. "movq %%mm0, %%mm5 \n\t"\
  1222. "pand bm00000011, %%mm5 \n\t"\
  1223. "por %%mm5, %%mm4 \n\t"\
  1224. PAVGB(%%mm3, %%mm4)\
  1225. PAVGB(%%mm4, %%mm0)\
  1226. "movd %%mm0, (%0) \n\t"\
  1227. "psrlq $32, %%mm0 \n\t"\
  1228. "movd %%mm0, 4(%0) \n\t"
  1229. #endif
  1230. #define HLP(i) HLP3(i)
  1231. HLP(0)
  1232. "addl %1, %0 \n\t"
  1233. HLP(8)
  1234. "addl %1, %0 \n\t"
  1235. HLP(16)
  1236. "addl %1, %0 \n\t"
  1237. HLP(24)
  1238. "addl %1, %0 \n\t"
  1239. HLP(32)
  1240. "addl %1, %0 \n\t"
  1241. HLP(40)
  1242. "addl %1, %0 \n\t"
  1243. HLP(48)
  1244. "addl %1, %0 \n\t"
  1245. HLP(56)
  1246. "popl %0\n\t"
  1247. :
  1248. : "r" (dst), "r" (stride)
  1249. : "%eax", "%ebx"
  1250. );
  1251. #else
  1252. uint8_t *temp= tempBlock;
  1253. for(int y=0; y<BLOCK_SIZE; y++)
  1254. {
  1255. const int first= ABS(dst[-1] - dst[0]) < QP ? dst[-1] : dst[0];
  1256. const int last= ABS(dst[8] - dst[7]) < QP ? dst[8] : dst[7];
  1257. int sums[9];
  1258. sums[0] = first + temp[0];
  1259. sums[1] = temp[0] + temp[1];
  1260. sums[2] = temp[1] + temp[2];
  1261. sums[3] = temp[2] + temp[3];
  1262. sums[4] = temp[3] + temp[4];
  1263. sums[5] = temp[4] + temp[5];
  1264. sums[6] = temp[5] + temp[6];
  1265. sums[7] = temp[6] + temp[7];
  1266. sums[8] = temp[7] + last;
  1267. dst[0]= ((sums[0]<<2) + ((first + sums[2])<<1) + sums[4] + 8)>>4;
  1268. dst[1]= ((dst[1]<<2) + (first + sums[0] + sums[3]<<1) + sums[5] + 8)>>4;
  1269. dst[2]= ((dst[2]<<2) + (first + sums[1] + sums[4]<<1) + sums[6] + 8)>>4;
  1270. dst[3]= ((dst[3]<<2) + (sums[2] + sums[5]<<1) + sums[0] + sums[7] + 8)>>4;
  1271. dst[4]= ((dst[4]<<2) + (sums[3] + sums[6]<<1) + sums[1] + sums[8] + 8)>>4;
  1272. dst[5]= ((dst[5]<<2) + (last + sums[7] + sums[4]<<1) + sums[2] + 8)>>4;
  1273. dst[6]= ((last + dst[6]<<2) + (dst[7] + sums[5]<<1) + sums[3] + 8)>>4;
  1274. dst[7]= ((sums[8]<<2) + (last + sums[6]<<1) + sums[4] + 8)>>4;
  1275. dst+= stride;
  1276. temp+= TEMP_STRIDE;
  1277. }
  1278. #endif
  1279. }
  1280. static inline void dering(uint8_t src[], int stride, int QP)
  1281. {
  1282. //FIXME
  1283. #ifdef HAVE_MMX2X
  1284. asm volatile(
  1285. "leal (%0, %1), %%eax \n\t"
  1286. "leal (%%eax, %1, 4), %%ebx \n\t"
  1287. // 0 1 2 3 4 5 6 7 8 9
  1288. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1289. "pcmpeq %%mm6, %%mm6 \n\t"
  1290. "pxor %%mm7, %%mm7 \n\t"
  1291. #define FIND_MIN_MAX(addr)\
  1292. "movq (" #addr "), %%mm0, \n\t"\
  1293. "pminub %%mm0, %%mm6 \n\t"\
  1294. "pmaxub %%mm0, %%mm7 \n\t"
  1295. FIND_MIN_MAX(%0)
  1296. FIND_MIN_MAX(%%eax)
  1297. FIND_MIN_MAX(%%eax, %1)
  1298. FIND_MIN_MAX(%%eax, %1, 2)
  1299. FIND_MIN_MAX(%0, %1, 4)
  1300. FIND_MIN_MAX(%%ebx)
  1301. FIND_MIN_MAX(%%ebx, %1)
  1302. FIND_MIN_MAX(%%ebx, %1, 2)
  1303. FIND_MIN_MAX(%0, %1, 8)
  1304. FIND_MIN_MAX(%%ebx, %1, 2)
  1305. "movq %%mm6, %%mm4 \n\t"
  1306. "psrlq $32, %%mm6 \n\t"
  1307. "pminub %%mm4, %%mm6 \n\t"
  1308. "movq %%mm6, %%mm4 \n\t"
  1309. "psrlq $16, %%mm6 \n\t"
  1310. "pminub %%mm4, %%mm6 \n\t"
  1311. "movq %%mm6, %%mm4 \n\t"
  1312. "psrlq $8, %%mm6 \n\t"
  1313. "pminub %%mm4, %%mm6 \n\t" // min of pixels
  1314. "movq %%mm7, %%mm4 \n\t"
  1315. "psrlq $32, %%mm7 \n\t"
  1316. "pmaxub %%mm4, %%mm7 \n\t"
  1317. "movq %%mm7, %%mm4 \n\t"
  1318. "psrlq $16, %%mm7 \n\t"
  1319. "pmaxub %%mm4, %%mm7 \n\t"
  1320. "movq %%mm7, %%mm4 \n\t"
  1321. "psrlq $8, %%mm7 \n\t"
  1322. "pmaxub %%mm4, %%mm7 \n\t" // max of pixels
  1323. PAVGB(%%mm6, %%mm7) // (max + min)/2
  1324. : : "r" (src), "r" (stride), "r" (QP)
  1325. : "%eax", "%ebx"
  1326. );
  1327. #else
  1328. //FIXME
  1329. #endif
  1330. }
  1331. /**
  1332. * ...
  1333. * the mode value is interpreted as a quality value if its negative, its range is then (-1 ... -63)
  1334. * -63 is best quality -1 is worst
  1335. */
  1336. extern "C"{
  1337. void postprocess(unsigned char * src[], int src_stride,
  1338. unsigned char * dst[], int dst_stride,
  1339. int horizontal_size, int vertical_size,
  1340. QP_STORE_T *QP_store, int QP_stride,
  1341. int mode)
  1342. {
  1343. if(mode<0) mode= getModeForQuality(-mode);
  1344. /*
  1345. long long T= rdtsc();
  1346. for(int y=vertical_size-1; y>=0 ; y--)
  1347. memcpy(dst[0] + y*src_stride, src[0] + y*src_stride,src_stride);
  1348. // memcpy(dst[0], src[0],src_stride*vertical_size);
  1349. printf("%4dk\r", (rdtsc()-T)/1000);
  1350. return;
  1351. */
  1352. /*
  1353. long long T= rdtsc();
  1354. while( (rdtsc() - T)/1000 < 4000);
  1355. return;
  1356. */
  1357. postProcess(src[0], src_stride, dst[0], dst_stride,
  1358. horizontal_size, vertical_size, QP_store, QP_stride, false, mode);
  1359. horizontal_size >>= 1;
  1360. vertical_size >>= 1;
  1361. src_stride >>= 1;
  1362. dst_stride >>= 1;
  1363. if(1)
  1364. {
  1365. postProcess(src[1], src_stride, dst[1], dst_stride,
  1366. horizontal_size, vertical_size, QP_store, QP_stride, true, mode >>4);
  1367. postProcess(src[2], src_stride, dst[2], dst_stride,
  1368. horizontal_size, vertical_size, QP_store, QP_stride, true, mode >>4);
  1369. }
  1370. else
  1371. {
  1372. memcpy(dst[1], src[1], src_stride*horizontal_size);
  1373. memcpy(dst[2], src[2], src_stride*horizontal_size);
  1374. }
  1375. }
  1376. /**
  1377. * gets the mode flags for a given quality (larger values mean slower but better postprocessing)
  1378. * 0 <= quality < 64
  1379. */
  1380. int getModeForQuality(int quality){
  1381. int modes[6]= {
  1382. LUM_V_DEBLOCK,
  1383. LUM_V_DEBLOCK | LUM_H_DEBLOCK,
  1384. LUM_V_DEBLOCK | LUM_H_DEBLOCK | CHROM_V_DEBLOCK,
  1385. LUM_V_DEBLOCK | LUM_H_DEBLOCK | CHROM_V_DEBLOCK | CHROM_H_DEBLOCK,
  1386. LUM_V_DEBLOCK | LUM_H_DEBLOCK | CHROM_V_DEBLOCK | CHROM_H_DEBLOCK | LUM_DERING,
  1387. LUM_V_DEBLOCK | LUM_H_DEBLOCK | CHROM_V_DEBLOCK | CHROM_H_DEBLOCK | LUM_DERING | CHROM_DERING
  1388. };
  1389. return modes[ (quality*6) >>6 ];
  1390. }
  1391. } // extern "C"
  1392. /**
  1393. * Copies a block from src to dst and fixes the blacklevel
  1394. */
  1395. static inline void blockCopy(uint8_t dst[], int dstStride, uint8_t src[], int srcStride)
  1396. {
  1397. #ifdef HAVE_MMX
  1398. asm volatile(
  1399. "pushl %0 \n\t"
  1400. "pushl %1 \n\t"
  1401. "leal (%2,%2), %%eax \n\t"
  1402. "leal (%3,%3), %%ebx \n\t"
  1403. "movq packedYOffset, %%mm2 \n\t"
  1404. "movq packedYScale, %%mm3 \n\t"
  1405. #define SIMPLE_CPY \
  1406. "movq (%0), %%mm0 \n\t"\
  1407. "movq (%0,%2), %%mm1 \n\t"\
  1408. "psubusb %%mm2, %%mm0 \n\t"\
  1409. "psubusb %%mm2, %%mm1 \n\t"\
  1410. "movq %%mm0, (%1) \n\t"\
  1411. "movq %%mm1, (%1, %3) \n\t"\
  1412. #define SCALED_CPY \
  1413. "movq (%0), %%mm0 \n\t"\
  1414. "movq (%0,%2), %%mm1 \n\t"\
  1415. "psubusb %%mm2, %%mm0 \n\t"\
  1416. "psubusb %%mm2, %%mm1 \n\t"\
  1417. "pxor %%mm4, %%mm4 \n\t"\
  1418. "pxor %%mm5, %%mm5 \n\t"\
  1419. "punpcklbw %%mm0, %%mm4 \n\t"\
  1420. "punpckhbw %%mm0, %%mm5 \n\t"\
  1421. "pmulhuw %%mm3, %%mm4 \n\t"\
  1422. "pmulhuw %%mm3, %%mm5 \n\t"\
  1423. "packuswb %%mm5, %%mm4 \n\t"\
  1424. "movq %%mm4, (%1) \n\t"\
  1425. "pxor %%mm4, %%mm4 \n\t"\
  1426. "pxor %%mm5, %%mm5 \n\t"\
  1427. "punpcklbw %%mm1, %%mm4 \n\t"\
  1428. "punpckhbw %%mm1, %%mm5 \n\t"\
  1429. "pmulhuw %%mm3, %%mm4 \n\t"\
  1430. "pmulhuw %%mm3, %%mm5 \n\t"\
  1431. "packuswb %%mm5, %%mm4 \n\t"\
  1432. "movq %%mm4, (%1, %3) \n\t"\
  1433. #define CPY SCALED_CPY
  1434. //#define CPY SIMPLE_CPY
  1435. // "prefetchnta 8(%0)\n\t"
  1436. CPY
  1437. "addl %%eax, %0 \n\t"
  1438. "addl %%ebx, %1 \n\t"
  1439. CPY
  1440. "addl %%eax, %0 \n\t"
  1441. "addl %%ebx, %1 \n\t"
  1442. CPY
  1443. "addl %%eax, %0 \n\t"
  1444. "addl %%ebx, %1 \n\t"
  1445. CPY
  1446. "popl %1 \n\t"
  1447. "popl %0 \n\t"
  1448. : : "r" (src),
  1449. "r" (dst),
  1450. "r" (srcStride),
  1451. "r" (dstStride)
  1452. : "%eax", "%ebx"
  1453. );
  1454. #else
  1455. for(int i=0; i<BLOCK_SIZE; i++) // last 10x8 Block is copied allready so +2
  1456. memcpy( &(dst[dstStride*i]),
  1457. &(src[srcStride*i]), BLOCK_SIZE);
  1458. #endif
  1459. }
  1460. /**
  1461. * Filters array of bytes (Y or U or V values)
  1462. */
  1463. void postProcess(uint8_t src[], int srcStride, uint8_t dst[], int dstStride, int width, int height,
  1464. QP_STORE_T QPs[], int QPStride, bool isColor, int mode)
  1465. {
  1466. #ifdef TIMEING
  1467. long long T0, T1, memcpyTime=0, vertTime=0, horizTime=0, sumTime, diffTime=0;
  1468. sumTime= rdtsc();
  1469. #endif
  1470. /* we need 64bit here otherwise we´ll going to have a problem
  1471. after watching a black picture for 5 hours*/
  1472. static uint64_t *yHistogram= NULL;
  1473. if(!yHistogram)
  1474. {
  1475. yHistogram= new uint64_t[256];
  1476. for(int i=0; i<256; i++) yHistogram[i]= width*height/64/256;
  1477. }
  1478. int black=0, white=255; // blackest black and whitest white in the picture
  1479. if(!isColor)
  1480. {
  1481. uint64_t sum= 0;
  1482. for(int i=0; i<256; i++)
  1483. sum+= yHistogram[i];
  1484. uint64_t maxClipped= (uint64_t)(sum * maxClippedThreshold);
  1485. uint64_t clipped= sum;
  1486. for(black=255; black>0; black--)
  1487. {
  1488. if(clipped < maxClipped) break;
  1489. clipped-= yHistogram[black];
  1490. }
  1491. clipped= sum;
  1492. for(white=0; white<256; white++)
  1493. {
  1494. if(clipped < maxClipped) break;
  1495. clipped-= yHistogram[white];
  1496. }
  1497. // we cant handle negative correctures
  1498. packedYOffset= MAX(black - minAllowedY, 0);
  1499. packedYOffset|= packedYOffset<<32;
  1500. packedYOffset|= packedYOffset<<16;
  1501. packedYOffset|= packedYOffset<<8;
  1502. double scale= (double)(maxAllowedY - minAllowedY) / (double)(white-black);
  1503. packedYScale= uint16_t(scale*256.0 + 0.5);
  1504. packedYScale|= packedYScale<<32;
  1505. packedYScale|= packedYScale<<16;
  1506. }
  1507. else
  1508. {
  1509. packedYScale= 0x0100010001000100LL;
  1510. packedYOffset= 0;
  1511. }
  1512. for(int x=0; x<width; x+=BLOCK_SIZE)
  1513. blockCopy(dst + x, dstStride, src + x, srcStride);
  1514. for(int y=0; y<height; y+=BLOCK_SIZE)
  1515. {
  1516. //1% speedup if these are here instead of the inner loop
  1517. uint8_t *srcBlock= &(src[y*srcStride]);
  1518. uint8_t *dstBlock= &(dst[y*dstStride]);
  1519. uint8_t *vertSrcBlock= &(srcBlock[srcStride*3]); // Blocks are 10x8 -> *3 to start
  1520. uint8_t *vertBlock= &(dstBlock[dstStride*3]);
  1521. // finish 1 block before the next otherwise we´ll might have a problem
  1522. // with the L1 Cache of the P4 ... or only a few blocks at a time or soemthing
  1523. for(int x=0; x<width; x+=BLOCK_SIZE)
  1524. {
  1525. int QP= isColor ?
  1526. QPs[(y>>3)*QPStride + (x>>3)]:
  1527. (QPs[(y>>4)*QPStride + (x>>4)] * (packedYScale &0xFFFF))>>8;
  1528. #ifdef HAVE_MMX
  1529. asm volatile(
  1530. "movd %0, %%mm7 \n\t"
  1531. "packuswb %%mm7, %%mm7 \n\t" // 0, 0, 0, QP, 0, 0, 0, QP
  1532. "packuswb %%mm7, %%mm7 \n\t" // 0,QP, 0, QP, 0,QP, 0, QP
  1533. "packuswb %%mm7, %%mm7 \n\t" // QP,..., QP
  1534. "movq %%mm7, pQPb \n\t"
  1535. : : "r" (QP)
  1536. );
  1537. #endif
  1538. const int stride= dstStride;
  1539. if(y + 12 < height)
  1540. {
  1541. #ifdef MORE_TIMEING
  1542. T0= rdtsc();
  1543. #endif
  1544. #ifdef HAVE_MMX2
  1545. prefetchnta(vertSrcBlock + (((x>>3)&3) + 2)*srcStride + 32);
  1546. prefetchnta(vertSrcBlock + (((x>>3)&3) + 6)*srcStride + 32);
  1547. prefetcht0(vertBlock + (((x>>3)&3) + 2)*dstStride + 32);
  1548. prefetcht0(vertBlock + (((x>>3)&3) + 6)*dstStride + 32);
  1549. #elif defined(HAVE_3DNOW)
  1550. //FIXME check if this is faster on an 3dnow chip or if its faster without the prefetch or ...
  1551. /* prefetch(vertSrcBlock + (((x>>3)&3) + 2)*srcStride + 32);
  1552. prefetch(vertSrcBlock + (((x>>3)&3) + 6)*srcStride + 32);
  1553. prefetchw(vertBlock + (((x>>3)&3) + 2)*dstStride + 32);
  1554. prefetchw(vertBlock + (((x>>3)&3) + 6)*dstStride + 32);
  1555. */
  1556. #endif
  1557. if(!isColor) yHistogram[ srcBlock[0] ]++;
  1558. blockCopy(vertBlock + dstStride*2, dstStride,
  1559. vertSrcBlock + srcStride*2, srcStride);
  1560. #ifdef MORE_TIMEING
  1561. T1= rdtsc();
  1562. memcpyTime+= T1-T0;
  1563. T0=T1;
  1564. #endif
  1565. if(mode & V_DEBLOCK)
  1566. {
  1567. if(mode & RK_FILTER)
  1568. vertRKFilter(vertBlock, stride, QP);
  1569. else if(0)
  1570. vertX1Filter(vertBlock, stride, QP);
  1571. else
  1572. {
  1573. if( isVertDC(vertBlock, stride))
  1574. {
  1575. if(isVertMinMaxOk(vertBlock, stride, QP))
  1576. doVertLowPass(vertBlock, stride, QP);
  1577. }
  1578. else
  1579. doVertDefFilter(vertBlock, stride, QP);
  1580. }
  1581. }
  1582. #ifdef MORE_TIMEING
  1583. T1= rdtsc();
  1584. vertTime+= T1-T0;
  1585. T0=T1;
  1586. #endif
  1587. }
  1588. else
  1589. {
  1590. for(int i=2; i<BLOCK_SIZE/2+1; i++) // last 10x8 Block is copied allready so +2
  1591. memcpy( &(vertBlock[dstStride*i]),
  1592. &(vertSrcBlock[srcStride*i]), BLOCK_SIZE);
  1593. }
  1594. if(x - 8 >= 0 && x<width)
  1595. {
  1596. #ifdef MORE_TIMEING
  1597. T0= rdtsc();
  1598. #endif
  1599. if(mode & H_DEBLOCK)
  1600. {
  1601. if( isHorizDCAndCopy2Temp(dstBlock-4, stride))
  1602. {
  1603. if(isHorizMinMaxOk(tempBlock, TEMP_STRIDE, QP))
  1604. doHorizLowPassAndCopyBack(dstBlock-4, stride, QP);
  1605. }
  1606. else
  1607. doHorizDefFilterAndCopyBack(dstBlock-4, stride, QP);
  1608. }
  1609. #ifdef MORE_TIMEING
  1610. T1= rdtsc();
  1611. horizTime+= T1-T0;
  1612. T0=T1;
  1613. #endif
  1614. dering(dstBlock - 9 - stride, stride, QP);
  1615. }
  1616. else if(y!=0)
  1617. dering(dstBlock - stride*9 + width-9, stride, QP);
  1618. //FIXME dering filter will not be applied to last block (bottom right)
  1619. dstBlock+=8;
  1620. srcBlock+=8;
  1621. vertBlock+=8;
  1622. vertSrcBlock+=8;
  1623. }
  1624. }
  1625. #ifdef HAVE_3DNOW
  1626. asm volatile("femms");
  1627. #elif defined (HAVE_MMX)
  1628. asm volatile("emms");
  1629. #endif
  1630. #ifdef TIMEING
  1631. // FIXME diff is mostly the time spent for rdtsc (should subtract that but ...)
  1632. sumTime= rdtsc() - sumTime;
  1633. if(!isColor)
  1634. printf("cpy:%4dk, vert:%4dk, horiz:%4dk, sum:%4dk, diff:%4dk, color: %d/%d \r",
  1635. int(memcpyTime/1000), int(vertTime/1000), int(horizTime/1000),
  1636. int(sumTime/1000), int((sumTime-memcpyTime-vertTime-horizTime)/1000)
  1637. , black, white);
  1638. #endif
  1639. }