You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2777 lines
78KB

  1. /*
  2. Copyright (C) 2001 Michael Niedermayer (michaelni@gmx.at)
  3. This program is free software; you can redistribute it and/or modify
  4. it under the terms of the GNU General Public License as published by
  5. the Free Software Foundation; either version 2 of the License, or
  6. (at your option) any later version.
  7. This program is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU General Public License for more details.
  11. You should have received a copy of the GNU General Public License
  12. along with this program; if not, write to the Free Software
  13. Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  14. */
  15. /*
  16. C MMX MMX2 3DNow
  17. isVertDC Ec Ec
  18. isVertMinMaxOk Ec Ec
  19. doVertLowPass E e e
  20. doVertDefFilter Ec Ec Ec
  21. isHorizDC Ec Ec
  22. isHorizMinMaxOk a
  23. doHorizLowPass E a a
  24. doHorizDefFilter E ac ac
  25. deRing
  26. Vertical RKAlgo1 E a a
  27. Vertical X1 a E E
  28. Horizontal X1 a E E
  29. LinIpolDeinterlace a E E*
  30. LinBlendDeinterlace a E E*
  31. MedianDeinterlace Ec Ec
  32. * i dont have a 3dnow CPU -> its untested
  33. E = Exact implementation
  34. e = allmost exact implementation
  35. a = alternative / approximate impl
  36. c = checked against the other implementations (-vo md5)
  37. */
  38. /*
  39. TODO:
  40. verify that everything workes as it should (how?)
  41. reduce the time wasted on the mem transfer
  42. implement dering
  43. implement everything in C at least (done at the moment but ...)
  44. unroll stuff if instructions depend too much on the prior one
  45. we use 8x8 blocks for the horizontal filters, opendivx seems to use 8x4?
  46. move YScale thing to the end instead of fixing QP
  47. write a faster and higher quality deblocking filter :)
  48. do something about the speed of the horizontal filters
  49. make the mainloop more flexible (variable number of blocks at once
  50. (the if/else stuff per block is slowing things down)
  51. compare the quality & speed of all filters
  52. split this huge file
  53. fix warnings (unused vars, ...)
  54. noise reduction filters
  55. ...
  56. Notes:
  57. */
  58. //Changelog: use the CVS log
  59. #include <inttypes.h>
  60. #include <stdio.h>
  61. #include <stdlib.h>
  62. #include "../config.h"
  63. //#undef HAVE_MMX2
  64. //#define HAVE_3DNOW
  65. //#undef HAVE_MMX
  66. #include "postprocess.h"
  67. #define MIN(a,b) ((a) > (b) ? (b) : (a))
  68. #define MAX(a,b) ((a) < (b) ? (b) : (a))
  69. #define ABS(a) ((a) > 0 ? (a) : (-(a)))
  70. #define SIGN(a) ((a) > 0 ? 1 : -1)
  71. #ifdef HAVE_MMX2
  72. #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
  73. #elif defined (HAVE_3DNOW)
  74. #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
  75. #endif
  76. static uint64_t packedYOffset= 0x0000000000000000LL;
  77. static uint64_t packedYScale= 0x0100010001000100LL;
  78. static uint64_t w05= 0x0005000500050005LL;
  79. static uint64_t w20= 0x0020002000200020LL;
  80. static uint64_t w1400= 0x1400140014001400LL;
  81. static uint64_t bm00000001= 0x00000000000000FFLL;
  82. static uint64_t bm00010000= 0x000000FF00000000LL;
  83. static uint64_t bm00001000= 0x00000000FF000000LL;
  84. static uint64_t bm10000000= 0xFF00000000000000LL;
  85. static uint64_t bm10000001= 0xFF000000000000FFLL;
  86. static uint64_t bm11000011= 0xFFFF00000000FFFFLL;
  87. static uint64_t bm00000011= 0x000000000000FFFFLL;
  88. static uint64_t bm11111110= 0xFFFFFFFFFFFFFF00LL;
  89. static uint64_t bm11000000= 0xFFFF000000000000LL;
  90. static uint64_t bm00011000= 0x000000FFFF000000LL;
  91. static uint64_t bm00110011= 0x0000FFFF0000FFFFLL;
  92. static uint64_t bm11001100= 0xFFFF0000FFFF0000LL;
  93. static uint64_t b00= 0x0000000000000000LL;
  94. static uint64_t b01= 0x0101010101010101LL;
  95. static uint64_t b02= 0x0202020202020202LL;
  96. static uint64_t b0F= 0x0F0F0F0F0F0F0F0FLL;
  97. static uint64_t bFF= 0xFFFFFFFFFFFFFFFFLL;
  98. static uint64_t b20= 0x2020202020202020LL;
  99. static uint64_t b80= 0x8080808080808080LL;
  100. static uint64_t b7E= 0x7E7E7E7E7E7E7E7ELL;
  101. static uint64_t b7C= 0x7C7C7C7C7C7C7C7CLL;
  102. static uint64_t b3F= 0x3F3F3F3F3F3F3F3FLL;
  103. static uint64_t temp0=0;
  104. static uint64_t temp1=0;
  105. static uint64_t temp2=0;
  106. static uint64_t temp3=0;
  107. static uint64_t temp4=0;
  108. static uint64_t temp5=0;
  109. static uint64_t pQPb=0;
  110. static uint8_t tempBlock[16*16];
  111. int hFlatnessThreshold= 56 - 16;
  112. int vFlatnessThreshold= 56 - 16;
  113. //amount of "black" u r willing to loose to get a brightness corrected picture
  114. double maxClippedThreshold= 0.01;
  115. int maxAllowedY=255;
  116. //FIXME can never make a movie´s black brighter (anyone needs that?)
  117. int minAllowedY=0;
  118. #ifdef TIMING
  119. static inline long long rdtsc()
  120. {
  121. long long l;
  122. asm volatile( "rdtsc\n\t"
  123. : "=A" (l)
  124. );
  125. // printf("%d\n", int(l/1000));
  126. return l;
  127. }
  128. #endif
  129. #ifdef HAVE_MMX2
  130. static inline void prefetchnta(void *p)
  131. {
  132. asm volatile( "prefetchnta (%0)\n\t"
  133. : : "r" (p)
  134. );
  135. }
  136. static inline void prefetcht0(void *p)
  137. {
  138. asm volatile( "prefetcht0 (%0)\n\t"
  139. : : "r" (p)
  140. );
  141. }
  142. static inline void prefetcht1(void *p)
  143. {
  144. asm volatile( "prefetcht1 (%0)\n\t"
  145. : : "r" (p)
  146. );
  147. }
  148. static inline void prefetcht2(void *p)
  149. {
  150. asm volatile( "prefetcht2 (%0)\n\t"
  151. : : "r" (p)
  152. );
  153. }
  154. #endif
  155. //FIXME? |255-0| = 1 (shouldnt be a problem ...)
  156. /**
  157. * Check if the middle 8x8 Block in the given 8x10 block is flat
  158. */
  159. static inline int isVertDC(uint8_t src[], int stride){
  160. int numEq= 0;
  161. int y;
  162. src+= stride; // src points to begin of the 8x8 Block
  163. #ifdef HAVE_MMX
  164. asm volatile(
  165. "pushl %1\n\t"
  166. "movq b7E, %%mm7 \n\t" // mm7 = 0x7F
  167. "movq b7C, %%mm6 \n\t" // mm6 = 0x7D
  168. "movq (%1), %%mm0 \n\t"
  169. "addl %2, %1 \n\t"
  170. "movq (%1), %%mm1 \n\t"
  171. "psubb %%mm1, %%mm0 \n\t" // mm0 = differnece
  172. "paddb %%mm7, %%mm0 \n\t"
  173. "pcmpgtb %%mm6, %%mm0 \n\t"
  174. "addl %2, %1 \n\t"
  175. "movq (%1), %%mm2 \n\t"
  176. "psubb %%mm2, %%mm1 \n\t"
  177. "paddb %%mm7, %%mm1 \n\t"
  178. "pcmpgtb %%mm6, %%mm1 \n\t"
  179. "paddb %%mm1, %%mm0 \n\t"
  180. "addl %2, %1 \n\t"
  181. "movq (%1), %%mm1 \n\t"
  182. "psubb %%mm1, %%mm2 \n\t"
  183. "paddb %%mm7, %%mm2 \n\t"
  184. "pcmpgtb %%mm6, %%mm2 \n\t"
  185. "paddb %%mm2, %%mm0 \n\t"
  186. "addl %2, %1 \n\t"
  187. "movq (%1), %%mm2 \n\t"
  188. "psubb %%mm2, %%mm1 \n\t"
  189. "paddb %%mm7, %%mm1 \n\t"
  190. "pcmpgtb %%mm6, %%mm1 \n\t"
  191. "paddb %%mm1, %%mm0 \n\t"
  192. "addl %2, %1 \n\t"
  193. "movq (%1), %%mm1 \n\t"
  194. "psubb %%mm1, %%mm2 \n\t"
  195. "paddb %%mm7, %%mm2 \n\t"
  196. "pcmpgtb %%mm6, %%mm2 \n\t"
  197. "paddb %%mm2, %%mm0 \n\t"
  198. "addl %2, %1 \n\t"
  199. "movq (%1), %%mm2 \n\t"
  200. "psubb %%mm2, %%mm1 \n\t"
  201. "paddb %%mm7, %%mm1 \n\t"
  202. "pcmpgtb %%mm6, %%mm1 \n\t"
  203. "paddb %%mm1, %%mm0 \n\t"
  204. "addl %2, %1 \n\t"
  205. "movq (%1), %%mm1 \n\t"
  206. "psubb %%mm1, %%mm2 \n\t"
  207. "paddb %%mm7, %%mm2 \n\t"
  208. "pcmpgtb %%mm6, %%mm2 \n\t"
  209. "paddb %%mm2, %%mm0 \n\t"
  210. " \n\t"
  211. "movq %%mm0, %%mm1 \n\t"
  212. "psrlw $8, %%mm0 \n\t"
  213. "paddb %%mm1, %%mm0 \n\t"
  214. "movq %%mm0, %%mm1 \n\t"
  215. "psrlq $16, %%mm0 \n\t"
  216. "paddb %%mm1, %%mm0 \n\t"
  217. "movq %%mm0, %%mm1 \n\t"
  218. "psrlq $32, %%mm0 \n\t"
  219. "paddb %%mm1, %%mm0 \n\t"
  220. "popl %1\n\t"
  221. "movd %%mm0, %0 \n\t"
  222. : "=r" (numEq)
  223. : "r" (src), "r" (stride)
  224. );
  225. // printf("%d\n", numEq);
  226. numEq= (256 - (numEq & 0xFF)) &0xFF;
  227. // int asmEq= numEq;
  228. // numEq=0;
  229. // uint8_t *temp= src;
  230. #else
  231. for(y=0; y<BLOCK_SIZE-1; y++)
  232. {
  233. if(((src[0] - src[0+stride] + 1)&0xFFFF) < 3) numEq++;
  234. if(((src[1] - src[1+stride] + 1)&0xFFFF) < 3) numEq++;
  235. if(((src[2] - src[2+stride] + 1)&0xFFFF) < 3) numEq++;
  236. if(((src[3] - src[3+stride] + 1)&0xFFFF) < 3) numEq++;
  237. if(((src[4] - src[4+stride] + 1)&0xFFFF) < 3) numEq++;
  238. if(((src[5] - src[5+stride] + 1)&0xFFFF) < 3) numEq++;
  239. if(((src[6] - src[6+stride] + 1)&0xFFFF) < 3) numEq++;
  240. if(((src[7] - src[7+stride] + 1)&0xFFFF) < 3) numEq++;
  241. src+= stride;
  242. }
  243. #endif
  244. /* if(abs(numEq - asmEq) > 0)
  245. {
  246. printf("\nasm:%d c:%d\n", asmEq, numEq);
  247. for(int y=0; y<8; y++)
  248. {
  249. for(int x=0; x<8; x++)
  250. {
  251. printf("%d ", temp[x + y*stride]);
  252. }
  253. printf("\n");
  254. }
  255. }
  256. */
  257. // for(int i=0; i<numEq/8; i++) src[i]=255;
  258. return (numEq > vFlatnessThreshold) ? 1 : 0;
  259. }
  260. static inline int isVertMinMaxOk(uint8_t src[], int stride, int QP)
  261. {
  262. #ifdef HAVE_MMX
  263. int isOk;
  264. asm volatile(
  265. // "int $3 \n\t"
  266. "movq (%1, %2), %%mm0 \n\t"
  267. "movq (%1, %2, 8), %%mm1 \n\t"
  268. "movq %%mm0, %%mm2 \n\t"
  269. "psubusb %%mm1, %%mm0 \n\t"
  270. "psubusb %%mm2, %%mm1 \n\t"
  271. "por %%mm1, %%mm0 \n\t" // ABS Diff
  272. "movq pQPb, %%mm7 \n\t" // QP,..., QP
  273. "paddusb %%mm7, %%mm7 \n\t" // 2QP ... 2QP
  274. "psubusb %%mm7, %%mm0 \n\t" // Diff <= 2QP -> 0
  275. "pcmpeqd b00, %%mm0 \n\t"
  276. "psrlq $16, %%mm0 \n\t"
  277. "pcmpeqd bFF, %%mm0 \n\t"
  278. // "movd %%mm0, (%1, %2, 4)\n\t"
  279. "movd %%mm0, %0 \n\t"
  280. : "=r" (isOk)
  281. : "r" (src), "r" (stride)
  282. );
  283. return isOk ? 1 : 0;
  284. #else
  285. int isOk2= 1;
  286. int x;
  287. for(x=0; x<BLOCK_SIZE; x++)
  288. {
  289. if(abs((int)src[x + stride] - (int)src[x + (stride<<3)]) > 2*QP) isOk2=0;
  290. }
  291. /* if(isOk && !isOk2 || !isOk && isOk2)
  292. {
  293. printf("\nasm:%d c:%d QP:%d\n", isOk, isOk2, QP);
  294. for(int y=0; y<9; y++)
  295. {
  296. for(int x=0; x<8; x++)
  297. {
  298. printf("%d ", src[x + y*stride]);
  299. }
  300. printf("\n");
  301. }
  302. } */
  303. return isOk2;
  304. #endif
  305. }
  306. /**
  307. * Do a vertical low pass filter on the 8x10 block (only write to the 8x8 block in the middle)
  308. * using the 9-Tap Filter (1,1,2,2,4,2,2,1,1)/16
  309. */
  310. static inline void doVertLowPass(uint8_t *src, int stride, int QP)
  311. {
  312. // QP= 64;
  313. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  314. //#ifdef HAVE_MMX2
  315. asm volatile( //"movv %0 %1 %2\n\t"
  316. "pushl %0 \n\t"
  317. "movq pQPb, %%mm0 \n\t" // QP,..., QP
  318. // "movq bFF , %%mm0 \n\t" // QP,..., QP
  319. "movq (%0), %%mm6 \n\t"
  320. "movq (%0, %1), %%mm5 \n\t"
  321. "movq %%mm5, %%mm1 \n\t"
  322. "movq %%mm6, %%mm2 \n\t"
  323. "psubusb %%mm6, %%mm5 \n\t"
  324. "psubusb %%mm1, %%mm2 \n\t"
  325. "por %%mm5, %%mm2 \n\t" // ABS Diff of lines
  326. "psubusb %%mm0, %%mm2 \n\t" // diff <= QP -> 0
  327. "pcmpeqb b00, %%mm2 \n\t" // diff <= QP -> FF
  328. "pand %%mm2, %%mm6 \n\t"
  329. "pandn %%mm1, %%mm2 \n\t"
  330. "por %%mm2, %%mm6 \n\t"// First Line to Filter
  331. "movq (%0, %1, 8), %%mm5 \n\t"
  332. "leal (%0, %1, 4), %%eax \n\t"
  333. "leal (%0, %1, 8), %%ebx \n\t"
  334. "subl %1, %%ebx \n\t"
  335. "addl %1, %0 \n\t" // %0 points to line 1 not 0
  336. "movq (%0, %1, 8), %%mm7 \n\t"
  337. "movq %%mm5, %%mm1 \n\t"
  338. "movq %%mm7, %%mm2 \n\t"
  339. "psubusb %%mm7, %%mm5 \n\t"
  340. "psubusb %%mm1, %%mm2 \n\t"
  341. "por %%mm5, %%mm2 \n\t" // ABS Diff of lines
  342. "psubusb %%mm0, %%mm2 \n\t" // diff <= QP -> 0
  343. "pcmpeqb b00, %%mm2 \n\t" // diff <= QP -> FF
  344. "pand %%mm2, %%mm7 \n\t"
  345. "pandn %%mm1, %%mm2 \n\t"
  346. "por %%mm2, %%mm7 \n\t" // First Line to Filter
  347. // 1 2 3 4 5 6 7 8
  348. // %0 %0+%1 %0+2%1 eax %0+4%1 eax+2%1 ebx eax+4%1
  349. // 6 4 2 2 1 1
  350. // 6 4 4 2
  351. // 6 8 2
  352. /*
  353. "movq %%mm6, %%mm2 \n\t" //1
  354. "movq %%mm6, %%mm3 \n\t" //1
  355. "paddusb b02, %%mm3 \n\t"
  356. "psrlw $2, %%mm3 \n\t" //1 /4
  357. "pand b3F, %%mm3 \n\t"
  358. "psubb %%mm3, %%mm2 \n\t"
  359. "movq (%0, %1), %%mm0 \n\t" // 1
  360. "movq %%mm0, %%mm1 \n\t" // 1
  361. "paddusb b02, %%mm0 \n\t"
  362. "psrlw $2, %%mm0 \n\t" // 1 /4
  363. "pand b3F, %%mm0 \n\t"
  364. "paddusb %%mm2, %%mm0 \n\t" //3 1 /4
  365. */
  366. "movq (%0, %1), %%mm0 \n\t" // 1
  367. "movq %%mm0, %%mm1 \n\t" // 1
  368. PAVGB(%%mm6, %%mm0) //1 1 /2
  369. PAVGB(%%mm6, %%mm0) //3 1 /4
  370. "movq (%0, %1, 4), %%mm2 \n\t" // 1
  371. "movq %%mm2, %%mm5 \n\t" // 1
  372. PAVGB((%%eax), %%mm2) // 11 /2
  373. PAVGB((%0, %1, 2), %%mm2) // 211 /4
  374. "movq %%mm2, %%mm3 \n\t" // 211 /4
  375. "movq (%0), %%mm4 \n\t" // 1
  376. PAVGB(%%mm4, %%mm3) // 4 211 /8
  377. PAVGB(%%mm0, %%mm3) //642211 /16
  378. "movq %%mm3, (%0) \n\t" // X
  379. // mm1=2 mm2=3(211) mm4=1 mm5=5 mm6=0 mm7=9
  380. "movq %%mm1, %%mm0 \n\t" // 1
  381. PAVGB(%%mm6, %%mm0) //1 1 /2
  382. "movq %%mm4, %%mm3 \n\t" // 1
  383. PAVGB((%0,%1,2), %%mm3) // 1 1 /2
  384. PAVGB((%%eax,%1,2), %%mm5) // 11 /2
  385. PAVGB((%%eax), %%mm5) // 211 /4
  386. PAVGB(%%mm5, %%mm3) // 2 2211 /8
  387. PAVGB(%%mm0, %%mm3) //4242211 /16
  388. "movq %%mm3, (%0,%1) \n\t" // X
  389. // mm1=2 mm2=3(211) mm4=1 mm5=4(211) mm6=0 mm7=9
  390. PAVGB(%%mm4, %%mm6) //11 /2
  391. "movq (%%ebx), %%mm0 \n\t" // 1
  392. PAVGB((%%eax, %1, 2), %%mm0) // 11/2
  393. "movq %%mm0, %%mm3 \n\t" // 11/2
  394. PAVGB(%%mm1, %%mm0) // 2 11/4
  395. PAVGB(%%mm6, %%mm0) //222 11/8
  396. PAVGB(%%mm2, %%mm0) //22242211/16
  397. "movq (%0, %1, 2), %%mm2 \n\t" // 1
  398. "movq %%mm0, (%0, %1, 2) \n\t" // X
  399. // mm1=2 mm2=3 mm3=6(11) mm4=1 mm5=4(211) mm6=0(11) mm7=9
  400. "movq (%%eax, %1, 4), %%mm0 \n\t" // 1
  401. PAVGB((%%ebx), %%mm0) // 11 /2
  402. PAVGB(%%mm0, %%mm6) //11 11 /4
  403. PAVGB(%%mm1, %%mm4) // 11 /2
  404. PAVGB(%%mm2, %%mm1) // 11 /2
  405. PAVGB(%%mm1, %%mm6) //1122 11 /8
  406. PAVGB(%%mm5, %%mm6) //112242211 /16
  407. "movq (%%eax), %%mm5 \n\t" // 1
  408. "movq %%mm6, (%%eax) \n\t" // X
  409. // mm0=7(11) mm1=2(11) mm2=3 mm3=6(11) mm4=1(11) mm5=4 mm7=9
  410. "movq (%%eax, %1, 4), %%mm6 \n\t" // 1
  411. PAVGB(%%mm7, %%mm6) // 11 /2
  412. PAVGB(%%mm4, %%mm6) // 11 11 /4
  413. PAVGB(%%mm3, %%mm6) // 11 2211 /8
  414. PAVGB(%%mm5, %%mm2) // 11 /2
  415. "movq (%0, %1, 4), %%mm4 \n\t" // 1
  416. PAVGB(%%mm4, %%mm2) // 112 /4
  417. PAVGB(%%mm2, %%mm6) // 112242211 /16
  418. "movq %%mm6, (%0, %1, 4) \n\t" // X
  419. // mm0=7(11) mm1=2(11) mm2=3(112) mm3=6(11) mm4=5 mm5=4 mm7=9
  420. PAVGB(%%mm7, %%mm1) // 11 2 /4
  421. PAVGB(%%mm4, %%mm5) // 11 /2
  422. PAVGB(%%mm5, %%mm0) // 11 11 /4
  423. "movq (%%eax, %1, 2), %%mm6 \n\t" // 1
  424. PAVGB(%%mm6, %%mm1) // 11 4 2 /8
  425. PAVGB(%%mm0, %%mm1) // 11224222 /16
  426. // "pxor %%mm1, %%mm1 \n\t"
  427. "movq %%mm1, (%%eax, %1, 2) \n\t" // X
  428. // mm2=3(112) mm3=6(11) mm4=5 mm5=4(11) mm6=6 mm7=9
  429. PAVGB((%%ebx), %%mm2) // 112 4 /8
  430. "movq (%%eax, %1, 4), %%mm0 \n\t" // 1
  431. PAVGB(%%mm0, %%mm6) // 1 1 /2
  432. PAVGB(%%mm7, %%mm6) // 1 12 /4
  433. PAVGB(%%mm2, %%mm6) // 1122424 /4
  434. // "pxor %%mm6, %%mm6 \n\t"
  435. "movq %%mm6, (%%ebx) \n\t" // X
  436. // mm0=8 mm3=6(11) mm4=5 mm5=4(11) mm7=9
  437. PAVGB(%%mm7, %%mm5) // 11 2 /4
  438. PAVGB(%%mm7, %%mm5) // 11 6 /8
  439. PAVGB(%%mm3, %%mm0) // 112 /4
  440. PAVGB(%%mm0, %%mm5) // 112246 /16
  441. // "pxor %%mm5, %%mm5 \n\t"
  442. // "movq pQPb, %%mm5 \n\t"
  443. "movq %%mm5, (%%eax, %1, 4) \n\t" // X
  444. "popl %0\n\t"
  445. :
  446. : "r" (src), "r" (stride)
  447. : "%eax", "%ebx"
  448. );
  449. #else
  450. const int l1= stride;
  451. const int l2= stride + l1;
  452. const int l3= stride + l2;
  453. const int l4= stride + l3;
  454. const int l5= stride + l4;
  455. const int l6= stride + l5;
  456. const int l7= stride + l6;
  457. const int l8= stride + l7;
  458. const int l9= stride + l8;
  459. int x;
  460. for(x=0; x<BLOCK_SIZE; x++)
  461. {
  462. const int first= ABS(src[0] - src[l1]) < QP ? src[0] : src[l1];
  463. const int last= ABS(src[l8] - src[l9]) < QP ? src[l9] : src[l8];
  464. int sums[9];
  465. sums[0] = first + src[l1];
  466. sums[1] = src[l1] + src[l2];
  467. sums[2] = src[l2] + src[l3];
  468. sums[3] = src[l3] + src[l4];
  469. sums[4] = src[l4] + src[l5];
  470. sums[5] = src[l5] + src[l6];
  471. sums[6] = src[l6] + src[l7];
  472. sums[7] = src[l7] + src[l8];
  473. sums[8] = src[l8] + last;
  474. src[l1]= ((sums[0]<<2) + ((first + sums[2])<<1) + sums[4] + 8)>>4;
  475. src[l2]= ((src[l2]<<2) + (first + sums[0] + sums[3]<<1) + sums[5] + 8)>>4;
  476. src[l3]= ((src[l3]<<2) + (first + sums[1] + sums[4]<<1) + sums[6] + 8)>>4;
  477. src[l4]= ((src[l4]<<2) + (sums[2] + sums[5]<<1) + sums[0] + sums[7] + 8)>>4;
  478. src[l5]= ((src[l5]<<2) + (sums[3] + sums[6]<<1) + sums[1] + sums[8] + 8)>>4;
  479. src[l6]= ((src[l6]<<2) + (last + sums[7] + sums[4]<<1) + sums[2] + 8)>>4;
  480. src[l7]= ((last + src[l7]<<2) + (src[l8] + sums[5]<<1) + sums[3] + 8)>>4;
  481. src[l8]= ((sums[8]<<2) + (last + sums[6]<<1) + sums[4] + 8)>>4;
  482. src++;
  483. }
  484. #endif
  485. }
  486. /**
  487. * Experimental implementation of the filter (Algorithm 1) described in a paper from Ramkishor & Karandikar
  488. * values are correctly clipped (MMX2)
  489. * values are wraparound (C)
  490. * conclusion: its fast, but introduces ugly horizontal patterns if there is a continious gradient
  491. 0 8 16 24
  492. x = 8
  493. x/2 = 4
  494. x/8 = 1
  495. 1 12 12 23
  496. */
  497. static inline void vertRK1Filter(uint8_t *src, int stride, int QP)
  498. {
  499. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  500. // FIXME rounding
  501. asm volatile(
  502. "pxor %%mm7, %%mm7 \n\t" // 0
  503. "movq b80, %%mm6 \n\t" // MIN_SIGNED_BYTE
  504. "leal (%0, %1), %%eax \n\t"
  505. "leal (%%eax, %1, 4), %%ebx \n\t"
  506. // 0 1 2 3 4 5 6 7 8 9
  507. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  508. "movq pQPb, %%mm0 \n\t" // QP,..., QP
  509. "movq %%mm0, %%mm1 \n\t" // QP,..., QP
  510. "paddusb b02, %%mm0 \n\t"
  511. "psrlw $2, %%mm0 \n\t"
  512. "pand b3F, %%mm0 \n\t" // QP/4,..., QP/4
  513. "paddusb %%mm1, %%mm0 \n\t" // QP*1.25 ...
  514. "movq (%0, %1, 4), %%mm2 \n\t" // line 4
  515. "movq (%%ebx), %%mm3 \n\t" // line 5
  516. "movq %%mm2, %%mm4 \n\t" // line 4
  517. "pcmpeqb %%mm5, %%mm5 \n\t" // -1
  518. "pxor %%mm2, %%mm5 \n\t" // -line 4 - 1
  519. PAVGB(%%mm3, %%mm5)
  520. "paddb %%mm6, %%mm5 \n\t" // (l5-l4)/2
  521. "psubusb %%mm3, %%mm4 \n\t"
  522. "psubusb %%mm2, %%mm3 \n\t"
  523. "por %%mm3, %%mm4 \n\t" // |l4 - l5|
  524. "psubusb %%mm0, %%mm4 \n\t"
  525. "pcmpeqb %%mm7, %%mm4 \n\t"
  526. "pand %%mm4, %%mm5 \n\t" // d/2
  527. // "paddb %%mm6, %%mm2 \n\t" // line 4 + 0x80
  528. "paddb %%mm5, %%mm2 \n\t"
  529. // "psubb %%mm6, %%mm2 \n\t"
  530. "movq %%mm2, (%0,%1, 4) \n\t"
  531. "movq (%%ebx), %%mm2 \n\t"
  532. // "paddb %%mm6, %%mm2 \n\t" // line 5 + 0x80
  533. "psubb %%mm5, %%mm2 \n\t"
  534. // "psubb %%mm6, %%mm2 \n\t"
  535. "movq %%mm2, (%%ebx) \n\t"
  536. "paddb %%mm6, %%mm5 \n\t"
  537. "psrlw $2, %%mm5 \n\t"
  538. "pand b3F, %%mm5 \n\t"
  539. "psubb b20, %%mm5 \n\t" // (l5-l4)/8
  540. "movq (%%eax, %1, 2), %%mm2 \n\t"
  541. "paddb %%mm6, %%mm2 \n\t" // line 3 + 0x80
  542. "paddsb %%mm5, %%mm2 \n\t"
  543. "psubb %%mm6, %%mm2 \n\t"
  544. "movq %%mm2, (%%eax, %1, 2) \n\t"
  545. "movq (%%ebx, %1), %%mm2 \n\t"
  546. "paddb %%mm6, %%mm2 \n\t" // line 6 + 0x80
  547. "psubsb %%mm5, %%mm2 \n\t"
  548. "psubb %%mm6, %%mm2 \n\t"
  549. "movq %%mm2, (%%ebx, %1) \n\t"
  550. :
  551. : "r" (src), "r" (stride)
  552. : "%eax", "%ebx"
  553. );
  554. #else
  555. const int l1= stride;
  556. const int l2= stride + l1;
  557. const int l3= stride + l2;
  558. const int l4= stride + l3;
  559. const int l5= stride + l4;
  560. const int l6= stride + l5;
  561. const int l7= stride + l6;
  562. const int l8= stride + l7;
  563. const int l9= stride + l8;
  564. int x;
  565. for(x=0; x<BLOCK_SIZE; x++)
  566. {
  567. if(ABS(src[l4]-src[l5]) < QP + QP/4)
  568. {
  569. int v = (src[l5] - src[l4]);
  570. src[l3] +=v/8;
  571. src[l4] +=v/2;
  572. src[l5] -=v/2;
  573. src[l6] -=v/8;
  574. }
  575. src++;
  576. }
  577. #endif
  578. }
  579. /**
  580. * Experimental Filter 1
  581. * will not damage linear gradients
  582. * Flat blocks should look like they where passed through the (1,1,2,2,4,2,2,1,1) 9-Tap filter
  583. * can only smooth blocks at the expected locations (it cant smooth them if they did move)
  584. * MMX2 version does correct clipping C version doesnt
  585. */
  586. static inline void vertX1Filter(uint8_t *src, int stride, int QP)
  587. {
  588. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  589. asm volatile(
  590. "pxor %%mm7, %%mm7 \n\t" // 0
  591. // "movq b80, %%mm6 \n\t" // MIN_SIGNED_BYTE
  592. "leal (%0, %1), %%eax \n\t"
  593. "leal (%%eax, %1, 4), %%ebx \n\t"
  594. // 0 1 2 3 4 5 6 7 8 9
  595. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  596. "movq (%%eax, %1, 2), %%mm0 \n\t" // line 3
  597. "movq (%0, %1, 4), %%mm1 \n\t" // line 4
  598. "movq %%mm1, %%mm2 \n\t" // line 4
  599. "psubusb %%mm0, %%mm1 \n\t"
  600. "psubusb %%mm2, %%mm0 \n\t"
  601. "por %%mm1, %%mm0 \n\t" // |l2 - l3|
  602. "movq (%%ebx), %%mm3 \n\t" // line 5
  603. "movq (%%ebx, %1), %%mm4 \n\t" // line 6
  604. "movq %%mm3, %%mm5 \n\t" // line 5
  605. "psubusb %%mm4, %%mm3 \n\t"
  606. "psubusb %%mm5, %%mm4 \n\t"
  607. "por %%mm4, %%mm3 \n\t" // |l5 - l6|
  608. PAVGB(%%mm3, %%mm0) // (|l2 - l3| + |l5 - l6|)/2
  609. "movq %%mm2, %%mm1 \n\t" // line 4
  610. "psubusb %%mm5, %%mm2 \n\t"
  611. "movq %%mm2, %%mm4 \n\t"
  612. "pcmpeqb %%mm7, %%mm2 \n\t" // (l4 - l5) <= 0 ? -1 : 0
  613. "psubusb %%mm1, %%mm5 \n\t"
  614. "por %%mm5, %%mm4 \n\t" // |l4 - l5|
  615. "psubusb %%mm0, %%mm4 \n\t" //d = MAX(0, |l4-l5| - (|l2-l3| + |l5-l6|)/2)
  616. "movq %%mm4, %%mm3 \n\t" // d
  617. "psubusb pQPb, %%mm4 \n\t"
  618. "pcmpeqb %%mm7, %%mm4 \n\t" // d <= QP ? -1 : 0
  619. "psubusb b01, %%mm3 \n\t"
  620. "pand %%mm4, %%mm3 \n\t" // d <= QP ? d : 0
  621. PAVGB(%%mm7, %%mm3) // d/2
  622. "movq %%mm3, %%mm1 \n\t" // d/2
  623. PAVGB(%%mm7, %%mm3) // d/4
  624. PAVGB(%%mm1, %%mm3) // 3*d/8
  625. "movq (%0, %1, 4), %%mm0 \n\t" // line 4
  626. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l4-1 : l4
  627. "psubusb %%mm3, %%mm0 \n\t"
  628. "pxor %%mm2, %%mm0 \n\t"
  629. "movq %%mm0, (%0, %1, 4) \n\t" // line 4
  630. "movq (%%ebx), %%mm0 \n\t" // line 5
  631. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l5-1 : l5
  632. "paddusb %%mm3, %%mm0 \n\t"
  633. "pxor %%mm2, %%mm0 \n\t"
  634. "movq %%mm0, (%%ebx) \n\t" // line 5
  635. PAVGB(%%mm7, %%mm1) // d/4
  636. "movq (%%eax, %1, 2), %%mm0 \n\t" // line 3
  637. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l4-1 : l4
  638. "psubusb %%mm1, %%mm0 \n\t"
  639. "pxor %%mm2, %%mm0 \n\t"
  640. "movq %%mm0, (%%eax, %1, 2) \n\t" // line 3
  641. "movq (%%ebx, %1), %%mm0 \n\t" // line 6
  642. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l5-1 : l5
  643. "paddusb %%mm1, %%mm0 \n\t"
  644. "pxor %%mm2, %%mm0 \n\t"
  645. "movq %%mm0, (%%ebx, %1) \n\t" // line 6
  646. PAVGB(%%mm7, %%mm1) // d/8
  647. "movq (%%eax, %1), %%mm0 \n\t" // line 2
  648. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l2-1 : l2
  649. "psubusb %%mm1, %%mm0 \n\t"
  650. "pxor %%mm2, %%mm0 \n\t"
  651. "movq %%mm0, (%%eax, %1) \n\t" // line 2
  652. "movq (%%ebx, %1, 2), %%mm0 \n\t" // line 7
  653. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l7-1 : l7
  654. "paddusb %%mm1, %%mm0 \n\t"
  655. "pxor %%mm2, %%mm0 \n\t"
  656. "movq %%mm0, (%%ebx, %1, 2) \n\t" // line 7
  657. :
  658. : "r" (src), "r" (stride)
  659. : "%eax", "%ebx"
  660. );
  661. #else
  662. const int l1= stride;
  663. const int l2= stride + l1;
  664. const int l3= stride + l2;
  665. const int l4= stride + l3;
  666. const int l5= stride + l4;
  667. const int l6= stride + l5;
  668. const int l7= stride + l6;
  669. const int l8= stride + l7;
  670. const int l9= stride + l8;
  671. int x;
  672. for(x=0; x<BLOCK_SIZE; x++)
  673. {
  674. int a= src[l3] - src[l4];
  675. int b= src[l4] - src[l5];
  676. int c= src[l5] - src[l6];
  677. int d= MAX(ABS(b) - (ABS(a) + ABS(c))/2, 0);
  678. if(d < QP)
  679. {
  680. int v = d * SIGN(-b);
  681. src[l2] +=v/8;
  682. src[l3] +=v/4;
  683. src[l4] +=3*v/8;
  684. src[l5] -=3*v/8;
  685. src[l6] -=v/4;
  686. src[l7] -=v/8;
  687. }
  688. src++;
  689. }
  690. /*
  691. const int l1= stride;
  692. const int l2= stride + l1;
  693. const int l3= stride + l2;
  694. const int l4= stride + l3;
  695. const int l5= stride + l4;
  696. const int l6= stride + l5;
  697. const int l7= stride + l6;
  698. const int l8= stride + l7;
  699. const int l9= stride + l8;
  700. for(int x=0; x<BLOCK_SIZE; x++)
  701. {
  702. int v2= src[l2];
  703. int v3= src[l3];
  704. int v4= src[l4];
  705. int v5= src[l5];
  706. int v6= src[l6];
  707. int v7= src[l7];
  708. if(ABS(v4-v5)<QP && ABS(v4-v5) - (ABS(v3-v4) + ABS(v5-v6))>0 )
  709. {
  710. src[l3] = (6*v2 + 4*v3 + 3*v4 + 2*v5 + v6 )/16;
  711. src[l4] = (3*v2 + 3*v3 + 4*v4 + 3*v5 + 2*v6 + v7 )/16;
  712. src[l5] = (1*v2 + 2*v3 + 3*v4 + 4*v5 + 3*v6 + 3*v7)/16;
  713. src[l6] = ( 1*v3 + 2*v4 + 3*v5 + 4*v6 + 6*v7)/16;
  714. }
  715. src++;
  716. }
  717. */
  718. #endif
  719. }
  720. /**
  721. * Experimental Filter 1 (Horizontal)
  722. * will not damage linear gradients
  723. * Flat blocks should look like they where passed through the (1,1,2,2,4,2,2,1,1) 9-Tap filter
  724. * can only smooth blocks at the expected locations (it cant smooth them if they did move)
  725. * MMX2 version does correct clipping C version doesnt
  726. * not identical with the vertical one
  727. */
  728. static inline void horizX1Filter(uint8_t *src, int stride, int QP)
  729. {
  730. int y;
  731. static uint64_t *lut= NULL;
  732. if(lut==NULL)
  733. {
  734. int i;
  735. lut= (uint64_t*)memalign(8, 256*8);
  736. for(i=0; i<256; i++)
  737. {
  738. int v= i < 128 ? 2*i : 2*(i-256);
  739. /*
  740. //Simulate 112242211 9-Tap filter
  741. uint64_t a= (v/16) & 0xFF;
  742. uint64_t b= (v/8) & 0xFF;
  743. uint64_t c= (v/4) & 0xFF;
  744. uint64_t d= (3*v/8) & 0xFF;
  745. */
  746. //Simulate piecewise linear interpolation
  747. uint64_t a= (v/16) & 0xFF;
  748. uint64_t b= (v*3/16) & 0xFF;
  749. uint64_t c= (v*5/16) & 0xFF;
  750. uint64_t d= (7*v/16) & 0xFF;
  751. uint64_t A= (0x100 - a)&0xFF;
  752. uint64_t B= (0x100 - b)&0xFF;
  753. uint64_t C= (0x100 - c)&0xFF;
  754. uint64_t D= (0x100 - c)&0xFF;
  755. lut[i] = (a<<56) | (b<<48) | (c<<40) | (d<<32) |
  756. (D<<24) | (C<<16) | (B<<8) | (A);
  757. //lut[i] = (v<<32) | (v<<24);
  758. }
  759. }
  760. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  761. asm volatile(
  762. "pxor %%mm7, %%mm7 \n\t" // 0
  763. // "movq b80, %%mm6 \n\t" // MIN_SIGNED_BYTE
  764. "leal (%0, %1), %%eax \n\t"
  765. "leal (%%eax, %1, 4), %%ebx \n\t"
  766. "movq b80, %%mm6 \n\t"
  767. "movd pQPb, %%mm5 \n\t" // QP
  768. "movq %%mm5, %%mm4 \n\t"
  769. "paddusb %%mm5, %%mm5 \n\t" // 2QP
  770. "paddusb %%mm5, %%mm4 \n\t" // 3QP
  771. "pxor %%mm5, %%mm5 \n\t" // 0
  772. "psubb %%mm4, %%mm5 \n\t" // -3QP
  773. "por bm11111110, %%mm5 \n\t" // ...,FF,FF,-3QP
  774. "psllq $24, %%mm5 \n\t"
  775. // 0 1 2 3 4 5 6 7 8 9
  776. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  777. #define HX1old(a) \
  778. "movd " #a ", %%mm0 \n\t"\
  779. "movd 4" #a ", %%mm1 \n\t"\
  780. "punpckldq %%mm1, %%mm0 \n\t"\
  781. "movq %%mm0, %%mm1 \n\t"\
  782. "movq %%mm0, %%mm2 \n\t"\
  783. "psrlq $8, %%mm1 \n\t"\
  784. "psubusb %%mm1, %%mm2 \n\t"\
  785. "psubusb %%mm0, %%mm1 \n\t"\
  786. "por %%mm2, %%mm1 \n\t" /* p´x = |px - p(x+1)| */\
  787. "pcmpeqb %%mm7, %%mm2 \n\t" /* p´x = sgn[px - p(x+1)] */\
  788. "pshufw $0x00, %%mm1, %%mm3 \n\t" /* p´5 = |p1 - p2| */\
  789. PAVGB(%%mm1, %%mm3) /* p´5 = (|p2-p1| + |p6-p5|)/2 */\
  790. "psrlq $16, %%mm3 \n\t" /* p´3 = (|p2-p1| + |p6-p5|)/2 */\
  791. "psubusb %%mm3, %%mm1 \n\t" /* |p3-p4|-(|p2-p1| + |p6-p5|)/2 */\
  792. "paddb %%mm5, %%mm1 \n\t"\
  793. "psubusb %%mm5, %%mm1 \n\t"\
  794. PAVGB(%%mm7, %%mm1)\
  795. "pxor %%mm2, %%mm1 \n\t"\
  796. "psubb %%mm2, %%mm1 \n\t"\
  797. "psrlq $24, %%mm1 \n\t"\
  798. "movd %%mm1, %%ecx \n\t"\
  799. "paddb %%mm6, %%mm0 \n\t"\
  800. "paddsb (%3, %%ecx, 8), %%mm0 \n\t"\
  801. "paddb %%mm6, %%mm0 \n\t"\
  802. "movq %%mm0, " #a " \n\t"\
  803. /*
  804. HX1old((%0))
  805. HX1old((%%eax))
  806. HX1old((%%eax, %1))
  807. HX1old((%%eax, %1, 2))
  808. HX1old((%0, %1, 4))
  809. HX1old((%%ebx))
  810. HX1old((%%ebx, %1))
  811. HX1old((%%ebx, %1, 2))
  812. */
  813. //FIXME add some comments, its unreadable ...
  814. #define HX1b(a, c, b, d) \
  815. "movd " #a ", %%mm0 \n\t"\
  816. "movd 4" #a ", %%mm1 \n\t"\
  817. "punpckldq %%mm1, %%mm0 \n\t"\
  818. "movd " #b ", %%mm4 \n\t"\
  819. "movq %%mm0, %%mm1 \n\t"\
  820. "movq %%mm0, %%mm2 \n\t"\
  821. "psrlq $8, %%mm1 \n\t"\
  822. "movd 4" #b ", %%mm3 \n\t"\
  823. "psubusb %%mm1, %%mm2 \n\t"\
  824. "psubusb %%mm0, %%mm1 \n\t"\
  825. "por %%mm2, %%mm1 \n\t" /* p´x = |px - p(x+1)| */\
  826. "pcmpeqb %%mm7, %%mm2 \n\t" /* p´x = sgn[px - p(x+1)] */\
  827. "punpckldq %%mm3, %%mm4 \n\t"\
  828. "movq %%mm1, %%mm3 \n\t"\
  829. "psllq $32, %%mm3 \n\t" /* p´5 = |p1 - p2| */\
  830. PAVGB(%%mm1, %%mm3) /* p´5 = (|p2-p1| + |p6-p5|)/2 */\
  831. "paddb %%mm6, %%mm0 \n\t"\
  832. "psrlq $16, %%mm3 \n\t" /* p´3 = (|p2-p1| + |p6-p5|)/2 */\
  833. "psubusb %%mm3, %%mm1 \n\t" /* |p3-p4|-(|p2-p1| + |p6-p5|)/2 */\
  834. "movq %%mm4, %%mm3 \n\t"\
  835. "paddb %%mm5, %%mm1 \n\t"\
  836. "psubusb %%mm5, %%mm1 \n\t"\
  837. "psrlq $8, %%mm3 \n\t"\
  838. PAVGB(%%mm7, %%mm1)\
  839. "pxor %%mm2, %%mm1 \n\t"\
  840. "psubb %%mm2, %%mm1 \n\t"\
  841. "movq %%mm4, %%mm2 \n\t"\
  842. "psrlq $24, %%mm1 \n\t"\
  843. "psubusb %%mm3, %%mm2 \n\t"\
  844. "movd %%mm1, %%ecx \n\t"\
  845. "psubusb %%mm4, %%mm3 \n\t"\
  846. "paddsb (%2, %%ecx, 8), %%mm0 \n\t"\
  847. "por %%mm2, %%mm3 \n\t" /* p´x = |px - p(x+1)| */\
  848. "paddb %%mm6, %%mm0 \n\t"\
  849. "pcmpeqb %%mm7, %%mm2 \n\t" /* p´x = sgn[px - p(x+1)] */\
  850. "movq %%mm3, %%mm1 \n\t"\
  851. "psllq $32, %%mm1 \n\t" /* p´5 = |p1 - p2| */\
  852. "movq %%mm0, " #a " \n\t"\
  853. PAVGB(%%mm3, %%mm1) /* p´5 = (|p2-p1| + |p6-p5|)/2 */\
  854. "paddb %%mm6, %%mm4 \n\t"\
  855. "psrlq $16, %%mm1 \n\t" /* p´3 = (|p2-p1| + |p6-p5|)/2 */\
  856. "psubusb %%mm1, %%mm3 \n\t" /* |p3-p4|-(|p2-p1| + |p6-p5|)/2 */\
  857. "paddb %%mm5, %%mm3 \n\t"\
  858. "psubusb %%mm5, %%mm3 \n\t"\
  859. PAVGB(%%mm7, %%mm3)\
  860. "pxor %%mm2, %%mm3 \n\t"\
  861. "psubb %%mm2, %%mm3 \n\t"\
  862. "psrlq $24, %%mm3 \n\t"\
  863. "movd " #c ", %%mm0 \n\t"\
  864. "movd 4" #c ", %%mm1 \n\t"\
  865. "punpckldq %%mm1, %%mm0 \n\t"\
  866. "paddb %%mm6, %%mm0 \n\t"\
  867. "paddsb (%2, %%ecx, 8), %%mm0 \n\t"\
  868. "paddb %%mm6, %%mm0 \n\t"\
  869. "movq %%mm0, " #c " \n\t"\
  870. "movd %%mm3, %%ecx \n\t"\
  871. "movd " #d ", %%mm0 \n\t"\
  872. "paddsb (%2, %%ecx, 8), %%mm4 \n\t"\
  873. "movd 4" #d ", %%mm1 \n\t"\
  874. "paddb %%mm6, %%mm4 \n\t"\
  875. "punpckldq %%mm1, %%mm0 \n\t"\
  876. "movq %%mm4, " #b " \n\t"\
  877. "paddb %%mm6, %%mm0 \n\t"\
  878. "paddsb (%2, %%ecx, 8), %%mm0 \n\t"\
  879. "paddb %%mm6, %%mm0 \n\t"\
  880. "movq %%mm0, " #d " \n\t"\
  881. HX1b((%0),(%%eax),(%%eax, %1),(%%eax, %1, 2))
  882. HX1b((%0, %1, 4),(%%ebx),(%%ebx, %1),(%%ebx, %1, 2))
  883. :
  884. : "r" (src), "r" (stride), "r" (lut)
  885. : "%eax", "%ebx", "%ecx"
  886. );
  887. #else
  888. //FIXME (has little in common with the mmx2 version)
  889. for(y=0; y<BLOCK_SIZE; y++)
  890. {
  891. int a= src[1] - src[2];
  892. int b= src[3] - src[4];
  893. int c= src[5] - src[6];
  894. int d= MAX(ABS(b) - (ABS(a) + ABS(c))/2, 0);
  895. if(d < QP)
  896. {
  897. int v = d * SIGN(-b);
  898. src[1] +=v/8;
  899. src[2] +=v/4;
  900. src[3] +=3*v/8;
  901. src[4] -=3*v/8;
  902. src[5] -=v/4;
  903. src[6] -=v/8;
  904. }
  905. src+=stride;
  906. }
  907. #endif
  908. }
  909. static inline void doVertDefFilter(uint8_t src[], int stride, int QP)
  910. {
  911. #ifdef HAVE_MMX
  912. src+= stride;
  913. //FIXME try pmul for *5 stuff
  914. // src[0]=0;
  915. asm volatile(
  916. "pxor %%mm7, %%mm7 \n\t"
  917. "leal (%0, %1), %%eax \n\t"
  918. "leal (%%eax, %1, 4), %%ebx \n\t"
  919. // 0 1 2 3 4 5 6 7
  920. // %0 %0+%1 %0+2%1 eax+2%1 %0+4%1 eax+4%1 ebx+%1 ebx+2%1
  921. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1
  922. "movq (%0), %%mm0 \n\t"
  923. "movq %%mm0, %%mm1 \n\t"
  924. "punpcklbw %%mm7, %%mm0 \n\t" // low part of line 0
  925. "punpckhbw %%mm7, %%mm1 \n\t" // high part of line 0
  926. "movq (%%eax), %%mm2 \n\t"
  927. "movq %%mm2, %%mm3 \n\t"
  928. "punpcklbw %%mm7, %%mm2 \n\t" // low part of line 1
  929. "punpckhbw %%mm7, %%mm3 \n\t" // high part of line 1
  930. "movq (%%eax, %1), %%mm4 \n\t"
  931. "movq %%mm4, %%mm5 \n\t"
  932. "punpcklbw %%mm7, %%mm4 \n\t" // low part of line 2
  933. "punpckhbw %%mm7, %%mm5 \n\t" // high part of line 2
  934. "paddw %%mm0, %%mm0 \n\t" // 2L0
  935. "paddw %%mm1, %%mm1 \n\t" // 2H0
  936. "psubw %%mm4, %%mm2 \n\t" // L1 - L2
  937. "psubw %%mm5, %%mm3 \n\t" // H1 - H2
  938. "psubw %%mm2, %%mm0 \n\t" // 2L0 - L1 + L2
  939. "psubw %%mm3, %%mm1 \n\t" // 2H0 - H1 + H2
  940. "psllw $2, %%mm2 \n\t" // 4L1 - 4L2
  941. "psllw $2, %%mm3 \n\t" // 4H1 - 4H2
  942. "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2
  943. "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2
  944. "movq (%%eax, %1, 2), %%mm2 \n\t"
  945. "movq %%mm2, %%mm3 \n\t"
  946. "punpcklbw %%mm7, %%mm2 \n\t" // L3
  947. "punpckhbw %%mm7, %%mm3 \n\t" // H3
  948. "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2 - L3
  949. "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2 - H3
  950. "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
  951. "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
  952. "movq %%mm0, temp0 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
  953. "movq %%mm1, temp1 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
  954. "movq (%0, %1, 4), %%mm0 \n\t"
  955. "movq %%mm0, %%mm1 \n\t"
  956. "punpcklbw %%mm7, %%mm0 \n\t" // L4
  957. "punpckhbw %%mm7, %%mm1 \n\t" // H4
  958. "psubw %%mm0, %%mm2 \n\t" // L3 - L4
  959. "psubw %%mm1, %%mm3 \n\t" // H3 - H4
  960. "movq %%mm2, temp2 \n\t" // L3 - L4
  961. "movq %%mm3, temp3 \n\t" // H3 - H4
  962. "paddw %%mm4, %%mm4 \n\t" // 2L2
  963. "paddw %%mm5, %%mm5 \n\t" // 2H2
  964. "psubw %%mm2, %%mm4 \n\t" // 2L2 - L3 + L4
  965. "psubw %%mm3, %%mm5 \n\t" // 2H2 - H3 + H4
  966. "psllw $2, %%mm2 \n\t" // 4L3 - 4L4
  967. "psllw $2, %%mm3 \n\t" // 4H3 - 4H4
  968. "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4
  969. "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4
  970. //50 opcodes so far
  971. "movq (%%ebx), %%mm2 \n\t"
  972. "movq %%mm2, %%mm3 \n\t"
  973. "punpcklbw %%mm7, %%mm2 \n\t" // L5
  974. "punpckhbw %%mm7, %%mm3 \n\t" // H5
  975. "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4 - L5
  976. "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4 - H5
  977. "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4 - 2L5
  978. "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4 - 2H5
  979. "movq (%%ebx, %1), %%mm6 \n\t"
  980. "punpcklbw %%mm7, %%mm6 \n\t" // L6
  981. "psubw %%mm6, %%mm2 \n\t" // L5 - L6
  982. "movq (%%ebx, %1), %%mm6 \n\t"
  983. "punpckhbw %%mm7, %%mm6 \n\t" // H6
  984. "psubw %%mm6, %%mm3 \n\t" // H5 - H6
  985. "paddw %%mm0, %%mm0 \n\t" // 2L4
  986. "paddw %%mm1, %%mm1 \n\t" // 2H4
  987. "psubw %%mm2, %%mm0 \n\t" // 2L4 - L5 + L6
  988. "psubw %%mm3, %%mm1 \n\t" // 2H4 - H5 + H6
  989. "psllw $2, %%mm2 \n\t" // 4L5 - 4L6
  990. "psllw $2, %%mm3 \n\t" // 4H5 - 4H6
  991. "psubw %%mm2, %%mm0 \n\t" // 2L4 - 5L5 + 5L6
  992. "psubw %%mm3, %%mm1 \n\t" // 2H4 - 5H5 + 5H6
  993. "movq (%%ebx, %1, 2), %%mm2 \n\t"
  994. "movq %%mm2, %%mm3 \n\t"
  995. "punpcklbw %%mm7, %%mm2 \n\t" // L7
  996. "punpckhbw %%mm7, %%mm3 \n\t" // H7
  997. "paddw %%mm2, %%mm2 \n\t" // 2L7
  998. "paddw %%mm3, %%mm3 \n\t" // 2H7
  999. "psubw %%mm2, %%mm0 \n\t" // 2L4 - 5L5 + 5L6 - 2L7
  1000. "psubw %%mm3, %%mm1 \n\t" // 2H4 - 5H5 + 5H6 - 2H7
  1001. "movq temp0, %%mm2 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
  1002. "movq temp1, %%mm3 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
  1003. //FIXME pxor, psubw, pmax for abs
  1004. "movq %%mm7, %%mm6 \n\t" // 0
  1005. "pcmpgtw %%mm0, %%mm6 \n\t"
  1006. "pxor %%mm6, %%mm0 \n\t"
  1007. "psubw %%mm6, %%mm0 \n\t" // |2L4 - 5L5 + 5L6 - 2L7|
  1008. "movq %%mm7, %%mm6 \n\t" // 0
  1009. "pcmpgtw %%mm1, %%mm6 \n\t"
  1010. "pxor %%mm6, %%mm1 \n\t"
  1011. "psubw %%mm6, %%mm1 \n\t" // |2H4 - 5H5 + 5H6 - 2H7|
  1012. "movq %%mm7, %%mm6 \n\t" // 0
  1013. "pcmpgtw %%mm2, %%mm6 \n\t"
  1014. "pxor %%mm6, %%mm2 \n\t"
  1015. "psubw %%mm6, %%mm2 \n\t" // |2L0 - 5L1 + 5L2 - 2L3|
  1016. "movq %%mm7, %%mm6 \n\t" // 0
  1017. "pcmpgtw %%mm3, %%mm6 \n\t"
  1018. "pxor %%mm6, %%mm3 \n\t"
  1019. "psubw %%mm6, %%mm3 \n\t" // |2H0 - 5H1 + 5H2 - 2H3|
  1020. #ifdef HAVE_MMX2
  1021. "pminsw %%mm2, %%mm0 \n\t"
  1022. "pminsw %%mm3, %%mm1 \n\t"
  1023. #else
  1024. "movq %%mm0, %%mm6 \n\t"
  1025. "psubusw %%mm2, %%mm6 \n\t"
  1026. "psubw %%mm6, %%mm0 \n\t"
  1027. "movq %%mm1, %%mm6 \n\t"
  1028. "psubusw %%mm3, %%mm6 \n\t"
  1029. "psubw %%mm6, %%mm1 \n\t"
  1030. #endif
  1031. "movq %%mm7, %%mm6 \n\t" // 0
  1032. "pcmpgtw %%mm4, %%mm6 \n\t" // sign(2L2 - 5L3 + 5L4 - 2L5)
  1033. "pxor %%mm6, %%mm4 \n\t"
  1034. "psubw %%mm6, %%mm4 \n\t" // |2L2 - 5L3 + 5L4 - 2L5|
  1035. "pcmpgtw %%mm5, %%mm7 \n\t" // sign(2H2 - 5H3 + 5H4 - 2H5)
  1036. "pxor %%mm7, %%mm5 \n\t"
  1037. "psubw %%mm7, %%mm5 \n\t" // |2H2 - 5H3 + 5H4 - 2H5|
  1038. // 100 opcodes
  1039. "movd %2, %%mm2 \n\t" // QP
  1040. //"pcmpeqb %%mm2, %%mm2\n\t"
  1041. "punpcklwd %%mm2, %%mm2 \n\t"
  1042. "punpcklwd %%mm2, %%mm2 \n\t"
  1043. "psllw $3, %%mm2 \n\t" // 8QP
  1044. "movq %%mm2, %%mm3 \n\t" // 8QP
  1045. "pcmpgtw %%mm4, %%mm2 \n\t"
  1046. "pcmpgtw %%mm5, %%mm3 \n\t"
  1047. "pand %%mm2, %%mm4 \n\t"
  1048. "pand %%mm3, %%mm5 \n\t"
  1049. "psubusw %%mm0, %%mm4 \n\t" // hd
  1050. "psubusw %%mm1, %%mm5 \n\t" // ld
  1051. "movq w05, %%mm2 \n\t" // 5
  1052. "pmullw %%mm2, %%mm4 \n\t"
  1053. "pmullw %%mm2, %%mm5 \n\t"
  1054. "movq w20, %%mm2 \n\t" // 32
  1055. "paddw %%mm2, %%mm4 \n\t"
  1056. "paddw %%mm2, %%mm5 \n\t"
  1057. "psrlw $6, %%mm4 \n\t"
  1058. "psrlw $6, %%mm5 \n\t"
  1059. /*
  1060. "movq w06, %%mm2 \n\t" // 6
  1061. "paddw %%mm2, %%mm4 \n\t"
  1062. "paddw %%mm2, %%mm5 \n\t"
  1063. "movq w1400, %%mm2 \n\t" // 1400h = 5120 = 5/64*2^16
  1064. //FIXME if *5/64 is supposed to be /13 then we should use 5041 instead of 5120
  1065. "pmulhw %%mm2, %%mm4 \n\t" // hd/13
  1066. "pmulhw %%mm2, %%mm5 \n\t" // ld/13
  1067. */
  1068. "movq temp2, %%mm0 \n\t" // L3 - L4
  1069. "movq temp3, %%mm1 \n\t" // H3 - H4
  1070. "pxor %%mm2, %%mm2 \n\t"
  1071. "pxor %%mm3, %%mm3 \n\t"
  1072. // FIXME rounding error
  1073. "psraw $1, %%mm0 \n\t" // (L3 - L4)/2
  1074. "psraw $1, %%mm1 \n\t" // (H3 - H4)/2
  1075. "pcmpgtw %%mm0, %%mm2 \n\t" // sign (L3-L4)
  1076. "pcmpgtw %%mm1, %%mm3 \n\t" // sign (H3-H4)
  1077. "pxor %%mm2, %%mm0 \n\t"
  1078. "pxor %%mm3, %%mm1 \n\t"
  1079. "psubw %%mm2, %%mm0 \n\t" // |L3-L4|
  1080. "psubw %%mm3, %%mm1 \n\t" // |H3-H4|
  1081. // "psrlw $1, %%mm0 \n\t" // |L3 - L4|/2
  1082. // "psrlw $1, %%mm1 \n\t" // |H3 - H4|/2
  1083. "pxor %%mm6, %%mm2 \n\t"
  1084. "pxor %%mm7, %%mm3 \n\t"
  1085. "pand %%mm2, %%mm4 \n\t"
  1086. "pand %%mm3, %%mm5 \n\t"
  1087. #ifdef HAVE_MMX2
  1088. "pminsw %%mm0, %%mm4 \n\t"
  1089. "pminsw %%mm1, %%mm5 \n\t"
  1090. #else
  1091. "movq %%mm4, %%mm2 \n\t"
  1092. "psubusw %%mm0, %%mm2 \n\t"
  1093. "psubw %%mm2, %%mm4 \n\t"
  1094. "movq %%mm5, %%mm2 \n\t"
  1095. "psubusw %%mm1, %%mm2 \n\t"
  1096. "psubw %%mm2, %%mm5 \n\t"
  1097. #endif
  1098. "pxor %%mm6, %%mm4 \n\t"
  1099. "pxor %%mm7, %%mm5 \n\t"
  1100. "psubw %%mm6, %%mm4 \n\t"
  1101. "psubw %%mm7, %%mm5 \n\t"
  1102. "packsswb %%mm5, %%mm4 \n\t"
  1103. "movq (%%eax, %1, 2), %%mm0 \n\t"
  1104. "paddb %%mm4, %%mm0 \n\t"
  1105. "movq %%mm0, (%%eax, %1, 2) \n\t"
  1106. "movq (%0, %1, 4), %%mm0 \n\t"
  1107. "psubb %%mm4, %%mm0 \n\t"
  1108. // "pxor %%mm0, %%mm0 \n\t"
  1109. "movq %%mm0, (%0, %1, 4) \n\t"
  1110. :
  1111. : "r" (src), "r" (stride), "r" (QP)
  1112. : "%eax", "%ebx"
  1113. );
  1114. #else
  1115. const int l1= stride;
  1116. const int l2= stride + l1;
  1117. const int l3= stride + l2;
  1118. const int l4= stride + l3;
  1119. const int l5= stride + l4;
  1120. const int l6= stride + l5;
  1121. const int l7= stride + l6;
  1122. const int l8= stride + l7;
  1123. // const int l9= stride + l8;
  1124. int x;
  1125. for(x=0; x<BLOCK_SIZE; x++)
  1126. {
  1127. const int middleEnergy= 5*(src[l5] - src[l4]) + 2*(src[l3] - src[l6]);
  1128. if(ABS(middleEnergy) < 8*QP)
  1129. {
  1130. const int q=(src[l4] - src[l5])/2;
  1131. const int leftEnergy= 5*(src[l3] - src[l2]) + 2*(src[l1] - src[l4]);
  1132. const int rightEnergy= 5*(src[l7] - src[l6]) + 2*(src[l5] - src[l8]);
  1133. int d= ABS(middleEnergy) - MIN( ABS(leftEnergy), ABS(rightEnergy) );
  1134. d= MAX(d, 0);
  1135. d= (5*d + 32) >> 6;
  1136. d*= SIGN(-middleEnergy);
  1137. if(q>0)
  1138. {
  1139. d= d<0 ? 0 : d;
  1140. d= d>q ? q : d;
  1141. }
  1142. else
  1143. {
  1144. d= d>0 ? 0 : d;
  1145. d= d<q ? q : d;
  1146. }
  1147. src[l4]-= d;
  1148. src[l5]+= d;
  1149. }
  1150. src++;
  1151. }
  1152. #endif
  1153. }
  1154. //FIXME? |255-0| = 1
  1155. /**
  1156. * Check if the given 8x8 Block is mostly "flat" and copy the unaliged data into tempBlock.
  1157. */
  1158. static inline int isHorizDCAndCopy2Temp(uint8_t src[], int stride)
  1159. {
  1160. // src++;
  1161. int numEq= 0;
  1162. #ifdef HAVE_MMX
  1163. asm volatile (
  1164. // "int $3 \n\t"
  1165. "pushl %1\n\t"
  1166. "movq b7E, %%mm7 \n\t" // mm7 = 0x7F
  1167. "movq b7C, %%mm6 \n\t" // mm6 = 0x7D
  1168. "leal tempBlock, %%eax \n\t"
  1169. "pxor %%mm0, %%mm0 \n\t"
  1170. #define HDC_CHECK_AND_CPY(i) \
  1171. "movq -4(%1), %%mm2 \n\t"\
  1172. "psrlq $32, %%mm2 \n\t"\
  1173. "punpckldq 4(%1), %%mm2 \n\t" /* (%1) */\
  1174. "movq %%mm2, %%mm1 \n\t"\
  1175. "psrlq $8, %%mm2 \n\t"\
  1176. "psubb %%mm1, %%mm2 \n\t"\
  1177. "paddb %%mm7, %%mm2 \n\t"\
  1178. "pcmpgtb %%mm6, %%mm2 \n\t"\
  1179. "paddb %%mm2, %%mm0 \n\t"\
  1180. "movq %%mm1," #i "(%%eax) \n\t"
  1181. HDC_CHECK_AND_CPY(0)
  1182. "addl %2, %1 \n\t"
  1183. HDC_CHECK_AND_CPY(8)
  1184. "addl %2, %1 \n\t"
  1185. HDC_CHECK_AND_CPY(16)
  1186. "addl %2, %1 \n\t"
  1187. HDC_CHECK_AND_CPY(24)
  1188. "addl %2, %1 \n\t"
  1189. HDC_CHECK_AND_CPY(32)
  1190. "addl %2, %1 \n\t"
  1191. HDC_CHECK_AND_CPY(40)
  1192. "addl %2, %1 \n\t"
  1193. HDC_CHECK_AND_CPY(48)
  1194. "addl %2, %1 \n\t"
  1195. HDC_CHECK_AND_CPY(56)
  1196. "psllq $8, %%mm0 \n\t" // remove dummy value
  1197. "movq %%mm0, %%mm1 \n\t"
  1198. "psrlw $8, %%mm0 \n\t"
  1199. "paddb %%mm1, %%mm0 \n\t"
  1200. "movq %%mm0, %%mm1 \n\t"
  1201. "psrlq $16, %%mm0 \n\t"
  1202. "paddb %%mm1, %%mm0 \n\t"
  1203. "movq %%mm0, %%mm1 \n\t"
  1204. "psrlq $32, %%mm0 \n\t"
  1205. "paddb %%mm1, %%mm0 \n\t"
  1206. "popl %1\n\t"
  1207. "movd %%mm0, %0 \n\t"
  1208. : "=r" (numEq)
  1209. : "r" (src), "r" (stride)
  1210. : "%eax"
  1211. );
  1212. // printf("%d\n", numEq);
  1213. numEq= (256 - (numEq & 0xFF)) &0xFF;
  1214. #else
  1215. int y;
  1216. for(y=0; y<BLOCK_SIZE; y++)
  1217. {
  1218. if(((src[0] - src[1] + 1) & 0xFFFF) < 3) numEq++;
  1219. if(((src[1] - src[2] + 1) & 0xFFFF) < 3) numEq++;
  1220. if(((src[2] - src[3] + 1) & 0xFFFF) < 3) numEq++;
  1221. if(((src[3] - src[4] + 1) & 0xFFFF) < 3) numEq++;
  1222. if(((src[4] - src[5] + 1) & 0xFFFF) < 3) numEq++;
  1223. if(((src[5] - src[6] + 1) & 0xFFFF) < 3) numEq++;
  1224. if(((src[6] - src[7] + 1) & 0xFFFF) < 3) numEq++;
  1225. tempBlock[0 + y*TEMP_STRIDE] = src[0];
  1226. tempBlock[1 + y*TEMP_STRIDE] = src[1];
  1227. tempBlock[2 + y*TEMP_STRIDE] = src[2];
  1228. tempBlock[3 + y*TEMP_STRIDE] = src[3];
  1229. tempBlock[4 + y*TEMP_STRIDE] = src[4];
  1230. tempBlock[5 + y*TEMP_STRIDE] = src[5];
  1231. tempBlock[6 + y*TEMP_STRIDE] = src[6];
  1232. tempBlock[7 + y*TEMP_STRIDE] = src[7];
  1233. src+= stride;
  1234. }
  1235. #endif
  1236. /* if(abs(numEq - asmEq) > 0)
  1237. {
  1238. // printf("\nasm:%d c:%d\n", asmEq, numEq);
  1239. for(int y=0; y<8; y++)
  1240. {
  1241. for(int x=0; x<8; x++)
  1242. {
  1243. printf("%d ", src[x + y*stride]);
  1244. }
  1245. printf("\n");
  1246. }
  1247. }
  1248. */
  1249. // printf("%d\n", numEq);
  1250. return numEq > hFlatnessThreshold;
  1251. }
  1252. static inline int isHorizMinMaxOk(uint8_t src[], int stride, int QP)
  1253. {
  1254. #ifdef MMX_FIXME
  1255. FIXME
  1256. int isOk;
  1257. asm volatile(
  1258. // "int $3 \n\t"
  1259. "movq (%1, %2), %%mm0 \n\t"
  1260. "movq (%1, %2, 8), %%mm1 \n\t"
  1261. "movq %%mm0, %%mm2 \n\t"
  1262. "psubusb %%mm1, %%mm0 \n\t"
  1263. "psubusb %%mm2, %%mm1 \n\t"
  1264. "por %%mm1, %%mm0 \n\t" // ABS Diff
  1265. "movq pQPb, %%mm7 \n\t" // QP,..., QP
  1266. "paddusb %%mm7, %%mm7 \n\t" // 2QP ... 2QP
  1267. "psubusb %%mm7, %%mm0 \n\t" // Diff <= 2QP -> 0
  1268. "pcmpeqd b00, %%mm0 \n\t"
  1269. "psrlq $16, %%mm0 \n\t"
  1270. "pcmpeqd bFF, %%mm0 \n\t"
  1271. // "movd %%mm0, (%1, %2, 4)\n\t"
  1272. "movd %%mm0, %0 \n\t"
  1273. : "=r" (isOk)
  1274. : "r" (src), "r" (stride)
  1275. );
  1276. return isOk;
  1277. #else
  1278. if(abs(src[0] - src[7]) > 2*QP) return 0;
  1279. return 1;
  1280. #endif
  1281. }
  1282. static inline void doHorizDefFilterAndCopyBack(uint8_t dst[], int stride, int QP)
  1283. {
  1284. #ifdef HAVE_MMX
  1285. asm volatile(
  1286. "pushl %0 \n\t"
  1287. "pxor %%mm7, %%mm7 \n\t"
  1288. "movq bm00001000, %%mm6 \n\t"
  1289. "movd %2, %%mm5 \n\t" // QP
  1290. "movq %%mm5, %%mm4 \n\t"
  1291. "paddusb %%mm5, %%mm5 \n\t" // 2QP
  1292. "paddusb %%mm5, %%mm4 \n\t" // 3QP
  1293. "psllq $24, %%mm4 \n\t"
  1294. "pxor %%mm5, %%mm5 \n\t" // 0
  1295. "psubb %%mm4, %%mm5 \n\t" // -QP
  1296. "leal tempBlock, %%eax \n\t"
  1297. //FIXME? "unroll by 2" and mix
  1298. #ifdef HAVE_MMX2
  1299. #define HDF(i) \
  1300. "movq " #i "(%%eax), %%mm0 \n\t"\
  1301. "movq %%mm0, %%mm1 \n\t"\
  1302. "movq %%mm0, %%mm2 \n\t"\
  1303. "psrlq $8, %%mm1 \n\t"\
  1304. "psubusb %%mm1, %%mm2 \n\t"\
  1305. "psubusb %%mm0, %%mm1 \n\t"\
  1306. "por %%mm2, %%mm1 \n\t" /* p´x = |px - p(x+1)| */\
  1307. "pcmpeqb %%mm7, %%mm2 \n\t" /* p´x = sgn[px - p(x+1)] */\
  1308. "pshufw $0x00, %%mm1, %%mm3 \n\t" /* p´5 = |p1 - p2| */\
  1309. "pminub %%mm1, %%mm3 \n\t" /* p´5 = min(|p2-p1|, |p6-p5|)*/\
  1310. "psrlq $16, %%mm3 \n\t" /* p´3 = min(|p2-p1|, |p6-p5|)*/\
  1311. "psubusb %%mm3, %%mm1 \n\t" /* |p3-p4|-min(|p1-p2|,|p5-p6|) */\
  1312. "paddb %%mm5, %%mm1 \n\t"\
  1313. "psubusb %%mm5, %%mm1 \n\t"\
  1314. "psrlw $2, %%mm1 \n\t"\
  1315. "pxor %%mm2, %%mm1 \n\t"\
  1316. "psubb %%mm2, %%mm1 \n\t"\
  1317. "pand %%mm6, %%mm1 \n\t"\
  1318. "psubb %%mm1, %%mm0 \n\t"\
  1319. "psllq $8, %%mm1 \n\t"\
  1320. "paddb %%mm1, %%mm0 \n\t"\
  1321. "movd %%mm0, (%0) \n\t"\
  1322. "psrlq $32, %%mm0 \n\t"\
  1323. "movd %%mm0, 4(%0) \n\t"
  1324. #else
  1325. #define HDF(i)\
  1326. "movq " #i "(%%eax), %%mm0 \n\t"\
  1327. "movq %%mm0, %%mm1 \n\t"\
  1328. "movq %%mm0, %%mm2 \n\t"\
  1329. "psrlq $8, %%mm1 \n\t"\
  1330. "psubusb %%mm1, %%mm2 \n\t"\
  1331. "psubusb %%mm0, %%mm1 \n\t"\
  1332. "por %%mm2, %%mm1 \n\t" /* p´x = |px - p(x+1)| */\
  1333. "pcmpeqb %%mm7, %%mm2 \n\t" /* p´x = sgn[px - p(x+1)] */\
  1334. "movq %%mm1, %%mm3 \n\t"\
  1335. "psllq $32, %%mm3 \n\t"\
  1336. "movq %%mm3, %%mm4 \n\t"\
  1337. "psubusb %%mm1, %%mm4 \n\t"\
  1338. "psubb %%mm4, %%mm3 \n\t"\
  1339. "psrlq $16, %%mm3 \n\t" /* p´3 = min(|p2-p1|, |p6-p5|)*/\
  1340. "psubusb %%mm3, %%mm1 \n\t" /* |p3-p4|-min(|p1-p2|,|p5,ü6|) */\
  1341. "paddb %%mm5, %%mm1 \n\t"\
  1342. "psubusb %%mm5, %%mm1 \n\t"\
  1343. "psrlw $2, %%mm1 \n\t"\
  1344. "pxor %%mm2, %%mm1 \n\t"\
  1345. "psubb %%mm2, %%mm1 \n\t"\
  1346. "pand %%mm6, %%mm1 \n\t"\
  1347. "psubb %%mm1, %%mm0 \n\t"\
  1348. "psllq $8, %%mm1 \n\t"\
  1349. "paddb %%mm1, %%mm0 \n\t"\
  1350. "movd %%mm0, (%0) \n\t"\
  1351. "psrlq $32, %%mm0 \n\t"\
  1352. "movd %%mm0, 4(%0) \n\t"
  1353. #endif
  1354. HDF(0)
  1355. "addl %1, %0 \n\t"
  1356. HDF(8)
  1357. "addl %1, %0 \n\t"
  1358. HDF(16)
  1359. "addl %1, %0 \n\t"
  1360. HDF(24)
  1361. "addl %1, %0 \n\t"
  1362. HDF(32)
  1363. "addl %1, %0 \n\t"
  1364. HDF(40)
  1365. "addl %1, %0 \n\t"
  1366. HDF(48)
  1367. "addl %1, %0 \n\t"
  1368. HDF(56)
  1369. "popl %0 \n\t"
  1370. :
  1371. : "r" (dst), "r" (stride), "r" (QP)
  1372. : "%eax"
  1373. );
  1374. #else
  1375. uint8_t *src= tempBlock;
  1376. int y;
  1377. for(y=0; y<BLOCK_SIZE; y++)
  1378. {
  1379. const int middleEnergy= 5*(src[4] - src[5]) + 2*(src[2] - src[5]);
  1380. dst[0] = src[0];
  1381. dst[1] = src[1];
  1382. dst[2] = src[2];
  1383. dst[3] = src[3];
  1384. dst[4] = src[4];
  1385. dst[5] = src[5];
  1386. dst[6] = src[6];
  1387. dst[7] = src[7];
  1388. if(ABS(middleEnergy) < 8*QP)
  1389. {
  1390. const int q=(src[3] - src[4])/2;
  1391. const int leftEnergy= 5*(src[2] - src[1]) + 2*(src[0] - src[3]);
  1392. const int rightEnergy= 5*(src[6] - src[5]) + 2*(src[4] - src[7]);
  1393. int d= ABS(middleEnergy) - MIN( ABS(leftEnergy), ABS(rightEnergy) );
  1394. d= MAX(d, 0);
  1395. d= (5*d + 32) >> 6;
  1396. d*= SIGN(-middleEnergy);
  1397. if(q>0)
  1398. {
  1399. d= d<0 ? 0 : d;
  1400. d= d>q ? q : d;
  1401. }
  1402. else
  1403. {
  1404. d= d>0 ? 0 : d;
  1405. d= d<q ? q : d;
  1406. }
  1407. dst[3]-= d;
  1408. dst[4]+= d;
  1409. }
  1410. dst+= stride;
  1411. src+= TEMP_STRIDE;
  1412. }
  1413. #endif
  1414. }
  1415. /**
  1416. * Do a horizontal low pass filter on the 10x8 block (dst points to middle 8x8 Block)
  1417. * using the 9-Tap Filter (1,1,2,2,4,2,2,1,1)/16 (C version)
  1418. * using the 7-Tap Filter (2,2,2,4,2,2,2)/16 (MMX2/3DNOW version)
  1419. */
  1420. static inline void doHorizLowPassAndCopyBack(uint8_t dst[], int stride, int QP)
  1421. {
  1422. //return;
  1423. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1424. asm volatile( //"movv %0 %1 %2\n\t"
  1425. "pushl %0\n\t"
  1426. "pxor %%mm7, %%mm7 \n\t"
  1427. "leal tempBlock, %%eax \n\t"
  1428. /*
  1429. #define HLP1 "movq (%0), %%mm0 \n\t"\
  1430. "movq %%mm0, %%mm1 \n\t"\
  1431. "psllq $8, %%mm0 \n\t"\
  1432. PAVGB(%%mm1, %%mm0)\
  1433. "psrlw $8, %%mm0 \n\t"\
  1434. "pxor %%mm1, %%mm1 \n\t"\
  1435. "packuswb %%mm1, %%mm0 \n\t"\
  1436. "movq %%mm0, %%mm1 \n\t"\
  1437. "movq %%mm0, %%mm2 \n\t"\
  1438. "psllq $32, %%mm0 \n\t"\
  1439. "paddb %%mm0, %%mm1 \n\t"\
  1440. "psllq $16, %%mm2 \n\t"\
  1441. PAVGB(%%mm2, %%mm0)\
  1442. "movq %%mm0, %%mm3 \n\t"\
  1443. "pand bm11001100, %%mm0 \n\t"\
  1444. "paddusb %%mm0, %%mm3 \n\t"\
  1445. "psrlq $8, %%mm3 \n\t"\
  1446. PAVGB(%%mm1, %%mm4)\
  1447. PAVGB(%%mm3, %%mm2)\
  1448. "psrlq $16, %%mm2 \n\t"\
  1449. "punpcklbw %%mm2, %%mm2 \n\t"\
  1450. "movq %%mm2, (%0) \n\t"\
  1451. #define HLP2 "movq (%0), %%mm0 \n\t"\
  1452. "movq %%mm0, %%mm1 \n\t"\
  1453. "psllq $8, %%mm0 \n\t"\
  1454. PAVGB(%%mm1, %%mm0)\
  1455. "psrlw $8, %%mm0 \n\t"\
  1456. "pxor %%mm1, %%mm1 \n\t"\
  1457. "packuswb %%mm1, %%mm0 \n\t"\
  1458. "movq %%mm0, %%mm2 \n\t"\
  1459. "psllq $32, %%mm0 \n\t"\
  1460. "psllq $16, %%mm2 \n\t"\
  1461. PAVGB(%%mm2, %%mm0)\
  1462. "movq %%mm0, %%mm3 \n\t"\
  1463. "pand bm11001100, %%mm0 \n\t"\
  1464. "paddusb %%mm0, %%mm3 \n\t"\
  1465. "psrlq $8, %%mm3 \n\t"\
  1466. PAVGB(%%mm3, %%mm2)\
  1467. "psrlq $16, %%mm2 \n\t"\
  1468. "punpcklbw %%mm2, %%mm2 \n\t"\
  1469. "movq %%mm2, (%0) \n\t"\
  1470. */
  1471. // approximately a 7-Tap Filter with Vector (1,2,3,4,3,2,1)/16
  1472. /*
  1473. Implemented Exact 7-Tap
  1474. 9421 A321
  1475. 36421 64321
  1476. 334321 =
  1477. 1234321 =
  1478. 1234321 =
  1479. 123433 =
  1480. 12463 12346
  1481. 1249 123A
  1482. */
  1483. #ifdef HAVE_MMX2
  1484. #define HLP3(i) "movq " #i "(%%eax), %%mm0 \n\t"\
  1485. "movq %%mm0, %%mm1 \n\t"\
  1486. "movq %%mm0, %%mm2 \n\t"\
  1487. "movq %%mm0, %%mm3 \n\t"\
  1488. "movq %%mm0, %%mm4 \n\t"\
  1489. "psllq $8, %%mm1 \n\t"\
  1490. "psrlq $8, %%mm2 \n\t"\
  1491. "pand bm00000001, %%mm3 \n\t"\
  1492. "pand bm10000000, %%mm4 \n\t"\
  1493. "por %%mm3, %%mm1 \n\t"\
  1494. "por %%mm4, %%mm2 \n\t"\
  1495. PAVGB(%%mm2, %%mm1)\
  1496. PAVGB(%%mm1, %%mm0)\
  1497. \
  1498. "pshufw $0xF9, %%mm0, %%mm3 \n\t"\
  1499. "pshufw $0x90, %%mm0, %%mm4 \n\t"\
  1500. PAVGB(%%mm3, %%mm4)\
  1501. PAVGB(%%mm4, %%mm0)\
  1502. "movd %%mm0, (%0) \n\t"\
  1503. "psrlq $32, %%mm0 \n\t"\
  1504. "movd %%mm0, 4(%0) \n\t"
  1505. #else
  1506. #define HLP3(i) "movq " #i "(%%eax), %%mm0 \n\t"\
  1507. "movq %%mm0, %%mm1 \n\t"\
  1508. "movq %%mm0, %%mm2 \n\t"\
  1509. "movd -4(%0), %%mm3 \n\t" /*0001000*/\
  1510. "movd 8(%0), %%mm4 \n\t" /*0001000*/\
  1511. "psllq $8, %%mm1 \n\t"\
  1512. "psrlq $8, %%mm2 \n\t"\
  1513. "psrlq $24, %%mm3 \n\t"\
  1514. "psllq $56, %%mm4 \n\t"\
  1515. "por %%mm3, %%mm1 \n\t"\
  1516. "por %%mm4, %%mm2 \n\t"\
  1517. PAVGB(%%mm2, %%mm1)\
  1518. PAVGB(%%mm1, %%mm0)\
  1519. \
  1520. "movq %%mm0, %%mm3 \n\t"\
  1521. "movq %%mm0, %%mm4 \n\t"\
  1522. "movq %%mm0, %%mm5 \n\t"\
  1523. "psrlq $16, %%mm3 \n\t"\
  1524. "psllq $16, %%mm4 \n\t"\
  1525. "pand bm11000000, %%mm5 \n\t"\
  1526. "por %%mm5, %%mm3 \n\t"\
  1527. "movq %%mm0, %%mm5 \n\t"\
  1528. "pand bm00000011, %%mm5 \n\t"\
  1529. "por %%mm5, %%mm4 \n\t"\
  1530. PAVGB(%%mm3, %%mm4)\
  1531. PAVGB(%%mm4, %%mm0)\
  1532. "movd %%mm0, (%0) \n\t"\
  1533. "psrlq $32, %%mm0 \n\t"\
  1534. "movd %%mm0, 4(%0) \n\t"
  1535. #endif
  1536. /* uses the 7-Tap Filter: 1112111 */
  1537. #define NEW_HLP(i)\
  1538. "movq " #i "(%%eax), %%mm0 \n\t"\
  1539. "movq %%mm0, %%mm1 \n\t"\
  1540. "movq %%mm0, %%mm2 \n\t"\
  1541. "movd -4(%0), %%mm3 \n\t" /*0001000*/\
  1542. "movd 8(%0), %%mm4 \n\t" /*0001000*/\
  1543. "psllq $8, %%mm1 \n\t"\
  1544. "psrlq $8, %%mm2 \n\t"\
  1545. "psrlq $24, %%mm3 \n\t"\
  1546. "psllq $56, %%mm4 \n\t"\
  1547. "por %%mm3, %%mm1 \n\t"\
  1548. "por %%mm4, %%mm2 \n\t"\
  1549. "movq %%mm1, %%mm5 \n\t"\
  1550. PAVGB(%%mm2, %%mm1)\
  1551. PAVGB(%%mm1, %%mm0)\
  1552. "psllq $8, %%mm5 \n\t"\
  1553. "psrlq $8, %%mm2 \n\t"\
  1554. "por %%mm3, %%mm5 \n\t"\
  1555. "por %%mm4, %%mm2 \n\t"\
  1556. "movq %%mm5, %%mm1 \n\t"\
  1557. PAVGB(%%mm2, %%mm5)\
  1558. "psllq $8, %%mm1 \n\t"\
  1559. "psrlq $8, %%mm2 \n\t"\
  1560. "por %%mm3, %%mm1 \n\t"\
  1561. "por %%mm4, %%mm2 \n\t"\
  1562. PAVGB(%%mm2, %%mm1)\
  1563. PAVGB(%%mm1, %%mm5)\
  1564. PAVGB(%%mm5, %%mm0)\
  1565. "movd %%mm0, (%0) \n\t"\
  1566. "psrlq $32, %%mm0 \n\t"\
  1567. "movd %%mm0, 4(%0) \n\t"
  1568. /* uses the 9-Tap Filter: 112242211 */
  1569. #define NEW_HLP2(i)\
  1570. "movq " #i "(%%eax), %%mm0 \n\t" /*0001000*/\
  1571. "movq %%mm0, %%mm1 \n\t" /*0001000*/\
  1572. "movq %%mm0, %%mm2 \n\t" /*0001000*/\
  1573. "movd -4(%0), %%mm3 \n\t" /*0001000*/\
  1574. "movd 8(%0), %%mm4 \n\t" /*0001000*/\
  1575. "psllq $8, %%mm1 \n\t"\
  1576. "psrlq $8, %%mm2 \n\t"\
  1577. "psrlq $24, %%mm3 \n\t"\
  1578. "psllq $56, %%mm4 \n\t"\
  1579. "por %%mm3, %%mm1 \n\t" /*0010000*/\
  1580. "por %%mm4, %%mm2 \n\t" /*0000100*/\
  1581. "movq %%mm1, %%mm5 \n\t" /*0010000*/\
  1582. PAVGB(%%mm2, %%mm1) /*0010100*/\
  1583. PAVGB(%%mm1, %%mm0) /*0012100*/\
  1584. "psllq $8, %%mm5 \n\t"\
  1585. "psrlq $8, %%mm2 \n\t"\
  1586. "por %%mm3, %%mm5 \n\t" /*0100000*/\
  1587. "por %%mm4, %%mm2 \n\t" /*0000010*/\
  1588. "movq %%mm5, %%mm1 \n\t" /*0100000*/\
  1589. PAVGB(%%mm2, %%mm5) /*0100010*/\
  1590. "psllq $8, %%mm1 \n\t"\
  1591. "psrlq $8, %%mm2 \n\t"\
  1592. "por %%mm3, %%mm1 \n\t" /*1000000*/\
  1593. "por %%mm4, %%mm2 \n\t" /*0000001*/\
  1594. "movq %%mm1, %%mm6 \n\t" /*1000000*/\
  1595. PAVGB(%%mm2, %%mm1) /*1000001*/\
  1596. "psllq $8, %%mm6 \n\t"\
  1597. "psrlq $8, %%mm2 \n\t"\
  1598. "por %%mm3, %%mm6 \n\t"/*100000000*/\
  1599. "por %%mm4, %%mm2 \n\t"/*000000001*/\
  1600. PAVGB(%%mm2, %%mm6) /*100000001*/\
  1601. PAVGB(%%mm6, %%mm1) /*110000011*/\
  1602. PAVGB(%%mm1, %%mm5) /*112000211*/\
  1603. PAVGB(%%mm5, %%mm0) /*112242211*/\
  1604. "movd %%mm0, (%0) \n\t"\
  1605. "psrlq $32, %%mm0 \n\t"\
  1606. "movd %%mm0, 4(%0) \n\t"
  1607. #define HLP(i) NEW_HLP(i)
  1608. HLP(0)
  1609. "addl %1, %0 \n\t"
  1610. HLP(8)
  1611. "addl %1, %0 \n\t"
  1612. HLP(16)
  1613. "addl %1, %0 \n\t"
  1614. HLP(24)
  1615. "addl %1, %0 \n\t"
  1616. HLP(32)
  1617. "addl %1, %0 \n\t"
  1618. HLP(40)
  1619. "addl %1, %0 \n\t"
  1620. HLP(48)
  1621. "addl %1, %0 \n\t"
  1622. HLP(56)
  1623. "popl %0\n\t"
  1624. :
  1625. : "r" (dst), "r" (stride)
  1626. : "%eax", "%ebx"
  1627. );
  1628. #else
  1629. uint8_t *temp= tempBlock;
  1630. int y;
  1631. for(y=0; y<BLOCK_SIZE; y++)
  1632. {
  1633. const int first= ABS(dst[-1] - dst[0]) < QP ? dst[-1] : dst[0];
  1634. const int last= ABS(dst[8] - dst[7]) < QP ? dst[8] : dst[7];
  1635. int sums[9];
  1636. sums[0] = first + temp[0];
  1637. sums[1] = temp[0] + temp[1];
  1638. sums[2] = temp[1] + temp[2];
  1639. sums[3] = temp[2] + temp[3];
  1640. sums[4] = temp[3] + temp[4];
  1641. sums[5] = temp[4] + temp[5];
  1642. sums[6] = temp[5] + temp[6];
  1643. sums[7] = temp[6] + temp[7];
  1644. sums[8] = temp[7] + last;
  1645. dst[0]= ((sums[0]<<2) + ((first + sums[2])<<1) + sums[4] + 8)>>4;
  1646. dst[1]= ((dst[1]<<2) + (first + sums[0] + sums[3]<<1) + sums[5] + 8)>>4;
  1647. dst[2]= ((dst[2]<<2) + (first + sums[1] + sums[4]<<1) + sums[6] + 8)>>4;
  1648. dst[3]= ((dst[3]<<2) + (sums[2] + sums[5]<<1) + sums[0] + sums[7] + 8)>>4;
  1649. dst[4]= ((dst[4]<<2) + (sums[3] + sums[6]<<1) + sums[1] + sums[8] + 8)>>4;
  1650. dst[5]= ((dst[5]<<2) + (last + sums[7] + sums[4]<<1) + sums[2] + 8)>>4;
  1651. dst[6]= ((last + dst[6]<<2) + (dst[7] + sums[5]<<1) + sums[3] + 8)>>4;
  1652. dst[7]= ((sums[8]<<2) + (last + sums[6]<<1) + sums[4] + 8)>>4;
  1653. dst+= stride;
  1654. temp+= TEMP_STRIDE;
  1655. }
  1656. #endif
  1657. }
  1658. static inline void dering(uint8_t src[], int stride, int QP)
  1659. {
  1660. //FIXME
  1661. #ifdef HAVE_MMX2X
  1662. asm volatile(
  1663. "leal (%0, %1), %%eax \n\t"
  1664. "leal (%%eax, %1, 4), %%ebx \n\t"
  1665. // 0 1 2 3 4 5 6 7 8 9
  1666. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1667. "pcmpeq %%mm6, %%mm6 \n\t"
  1668. "pxor %%mm7, %%mm7 \n\t"
  1669. #define FIND_MIN_MAX(addr)\
  1670. "movq (" #addr "), %%mm0, \n\t"\
  1671. "pminub %%mm0, %%mm6 \n\t"\
  1672. "pmaxub %%mm0, %%mm7 \n\t"
  1673. FIND_MIN_MAX(%0)
  1674. FIND_MIN_MAX(%%eax)
  1675. FIND_MIN_MAX(%%eax, %1)
  1676. FIND_MIN_MAX(%%eax, %1, 2)
  1677. FIND_MIN_MAX(%0, %1, 4)
  1678. FIND_MIN_MAX(%%ebx)
  1679. FIND_MIN_MAX(%%ebx, %1)
  1680. FIND_MIN_MAX(%%ebx, %1, 2)
  1681. FIND_MIN_MAX(%0, %1, 8)
  1682. FIND_MIN_MAX(%%ebx, %1, 2)
  1683. "movq %%mm6, %%mm4 \n\t"
  1684. "psrlq $32, %%mm6 \n\t"
  1685. "pminub %%mm4, %%mm6 \n\t"
  1686. "movq %%mm6, %%mm4 \n\t"
  1687. "psrlq $16, %%mm6 \n\t"
  1688. "pminub %%mm4, %%mm6 \n\t"
  1689. "movq %%mm6, %%mm4 \n\t"
  1690. "psrlq $8, %%mm6 \n\t"
  1691. "pminub %%mm4, %%mm6 \n\t" // min of pixels
  1692. "movq %%mm7, %%mm4 \n\t"
  1693. "psrlq $32, %%mm7 \n\t"
  1694. "pmaxub %%mm4, %%mm7 \n\t"
  1695. "movq %%mm7, %%mm4 \n\t"
  1696. "psrlq $16, %%mm7 \n\t"
  1697. "pmaxub %%mm4, %%mm7 \n\t"
  1698. "movq %%mm7, %%mm4 \n\t"
  1699. "psrlq $8, %%mm7 \n\t"
  1700. "pmaxub %%mm4, %%mm7 \n\t" // max of pixels
  1701. PAVGB(%%mm6, %%mm7) // (max + min)/2
  1702. : : "r" (src), "r" (stride), "r" (QP)
  1703. : "%eax", "%ebx"
  1704. );
  1705. #else
  1706. //FIXME
  1707. #endif
  1708. }
  1709. /**
  1710. * Deinterlaces the given block
  1711. * will be called for every 8x8 block, except the last row, and can read & write into an 8x16 block
  1712. */
  1713. static inline void deInterlaceInterpolateLinear(uint8_t src[], int stride)
  1714. {
  1715. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1716. asm volatile(
  1717. "leal (%0, %1), %%eax \n\t"
  1718. "leal (%%eax, %1, 4), %%ebx \n\t"
  1719. // 0 1 2 3 4 5 6 7 8 9
  1720. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1721. "movq (%0), %%mm0 \n\t"
  1722. "movq (%%eax, %1), %%mm1 \n\t"
  1723. PAVGB(%%mm1, %%mm0)\
  1724. "movq %%mm0, (%%eax) \n\t"
  1725. "movq (%0, %1, 4), %%mm0 \n\t"
  1726. PAVGB(%%mm0, %%mm1)\
  1727. "movq %%mm1, (%%eax, %1, 2) \n\t"
  1728. "movq (%%ebx, %1), %%mm1 \n\t"
  1729. PAVGB(%%mm1, %%mm0)\
  1730. "movq %%mm0, (%%ebx) \n\t"
  1731. "movq (%0, %1, 8), %%mm0 \n\t"
  1732. PAVGB(%%mm0, %%mm1)\
  1733. "movq %%mm1, (%%ebx, %1, 2) \n\t"
  1734. : : "r" (src), "r" (stride)
  1735. : "%eax", "%ebx"
  1736. );
  1737. #else
  1738. int x;
  1739. for(x=0; x<8; x++)
  1740. {
  1741. src[stride] = (src[0] + src[stride*2])>>1;
  1742. src[stride*3] = (src[stride*2] + src[stride*4])>>1;
  1743. src[stride*5] = (src[stride*4] + src[stride*6])>>1;
  1744. src[stride*7] = (src[stride*6] + src[stride*8])>>1;
  1745. src++;
  1746. }
  1747. #endif
  1748. }
  1749. /**
  1750. * Deinterlaces the given block
  1751. * will be called for every 8x8 block, in the last row, and can read & write into an 8x8 block
  1752. */
  1753. static inline void deInterlaceInterpolateLinearLastRow(uint8_t src[], int stride)
  1754. {
  1755. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1756. asm volatile(
  1757. "leal (%0, %1), %%eax \n\t"
  1758. "leal (%%eax, %1, 4), %%ebx \n\t"
  1759. // 0 1 2 3 4 5 6 7 8 9
  1760. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1761. "movq (%0), %%mm0 \n\t"
  1762. "movq (%%eax, %1), %%mm1 \n\t"
  1763. PAVGB(%%mm1, %%mm0)\
  1764. "movq %%mm0, (%%eax) \n\t"
  1765. "movq (%0, %1, 4), %%mm0 \n\t"
  1766. PAVGB(%%mm0, %%mm1)\
  1767. "movq %%mm1, (%%eax, %1, 2) \n\t"
  1768. "movq (%%ebx, %1), %%mm1 \n\t"
  1769. PAVGB(%%mm1, %%mm0)\
  1770. "movq %%mm0, (%%ebx) \n\t"
  1771. "movq %%mm1, (%%ebx, %1, 2) \n\t"
  1772. : : "r" (src), "r" (stride)
  1773. : "%eax", "%ebx"
  1774. );
  1775. #else
  1776. int x;
  1777. for(x=0; x<8; x++)
  1778. {
  1779. src[stride] = (src[0] + src[stride*2])>>1;
  1780. src[stride*3] = (src[stride*2] + src[stride*4])>>1;
  1781. src[stride*5] = (src[stride*4] + src[stride*6])>>1;
  1782. src[stride*7] = src[stride*6];
  1783. src++;
  1784. }
  1785. #endif
  1786. }
  1787. /**
  1788. * Deinterlaces the given block
  1789. * will be called for every 8x8 block, except the last row, and can read & write into an 8x16 block
  1790. * will shift the image up by 1 line (FIXME if this is a problem)
  1791. */
  1792. static inline void deInterlaceBlendLinear(uint8_t src[], int stride)
  1793. {
  1794. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1795. asm volatile(
  1796. "leal (%0, %1), %%eax \n\t"
  1797. "leal (%%eax, %1, 4), %%ebx \n\t"
  1798. // 0 1 2 3 4 5 6 7 8 9
  1799. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1800. "movq (%0), %%mm0 \n\t" // L0
  1801. "movq (%%eax, %1), %%mm1 \n\t" // L2
  1802. PAVGB(%%mm1, %%mm0) // L0+L2
  1803. "movq (%%eax), %%mm2 \n\t" // L1
  1804. PAVGB(%%mm2, %%mm0)
  1805. "movq %%mm0, (%0) \n\t"
  1806. "movq (%%eax, %1, 2), %%mm0 \n\t" // L3
  1807. PAVGB(%%mm0, %%mm2) // L1+L3
  1808. PAVGB(%%mm1, %%mm2) // 2L2 + L1 + L3
  1809. "movq %%mm2, (%%eax) \n\t"
  1810. "movq (%0, %1, 4), %%mm2 \n\t" // L4
  1811. PAVGB(%%mm2, %%mm1) // L2+L4
  1812. PAVGB(%%mm0, %%mm1) // 2L3 + L2 + L4
  1813. "movq %%mm1, (%%eax, %1) \n\t"
  1814. "movq (%%ebx), %%mm1 \n\t" // L5
  1815. PAVGB(%%mm1, %%mm0) // L3+L5
  1816. PAVGB(%%mm2, %%mm0) // 2L4 + L3 + L5
  1817. "movq %%mm0, (%%eax, %1, 2) \n\t"
  1818. "movq (%%ebx, %1), %%mm0 \n\t" // L6
  1819. PAVGB(%%mm0, %%mm2) // L4+L6
  1820. PAVGB(%%mm1, %%mm2) // 2L5 + L4 + L6
  1821. "movq %%mm2, (%0, %1, 4) \n\t"
  1822. "movq (%%ebx, %1, 2), %%mm2 \n\t" // L7
  1823. PAVGB(%%mm2, %%mm1) // L5+L7
  1824. PAVGB(%%mm0, %%mm1) // 2L6 + L5 + L7
  1825. "movq %%mm1, (%%ebx) \n\t"
  1826. "movq (%0, %1, 8), %%mm1 \n\t" // L8
  1827. PAVGB(%%mm1, %%mm0) // L6+L8
  1828. PAVGB(%%mm2, %%mm0) // 2L7 + L6 + L8
  1829. "movq %%mm0, (%%ebx, %1) \n\t"
  1830. "movq (%%ebx, %1, 4), %%mm0 \n\t" // L9
  1831. PAVGB(%%mm0, %%mm2) // L7+L9
  1832. PAVGB(%%mm1, %%mm2) // 2L8 + L7 + L9
  1833. "movq %%mm2, (%%ebx, %1, 2) \n\t"
  1834. : : "r" (src), "r" (stride)
  1835. : "%eax", "%ebx"
  1836. );
  1837. #else
  1838. int x;
  1839. for(x=0; x<8; x++)
  1840. {
  1841. src[0 ] = (src[0 ] + 2*src[stride ] + src[stride*2])>>2;
  1842. src[stride ] = (src[stride ] + 2*src[stride*2] + src[stride*3])>>2;
  1843. src[stride*2] = (src[stride*2] + 2*src[stride*3] + src[stride*4])>>2;
  1844. src[stride*3] = (src[stride*3] + 2*src[stride*4] + src[stride*5])>>2;
  1845. src[stride*4] = (src[stride*4] + 2*src[stride*5] + src[stride*6])>>2;
  1846. src[stride*5] = (src[stride*5] + 2*src[stride*6] + src[stride*7])>>2;
  1847. src[stride*6] = (src[stride*6] + 2*src[stride*7] + src[stride*8])>>2;
  1848. src[stride*7] = (src[stride*7] + 2*src[stride*8] + src[stride*9])>>2;
  1849. src++;
  1850. }
  1851. #endif
  1852. }
  1853. /**
  1854. * Deinterlaces the given block
  1855. * will be called for every 8x8 block, in the last row, and can read & write into an 8x8 block
  1856. * will shift the image up by 1 line (FIXME if this is a problem)
  1857. */
  1858. static inline void deInterlaceBlendLinearLastRow(uint8_t src[], int stride)
  1859. {
  1860. #if defined (HAVE_MMSX2) || defined (HAVE_3DNOW)
  1861. asm volatile(
  1862. "leal (%0, %1), %%eax \n\t"
  1863. "leal (%%eax, %1, 4), %%ebx \n\t"
  1864. // 0 1 2 3 4 5 6 7 8 9
  1865. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1866. "movq (%0), %%mm0 \n\t" // L0
  1867. "movq (%%eax, %1), %%mm1 \n\t" // L2
  1868. PAVGB(%%mm1, %%mm0) // L0+L2
  1869. "movq (%%eax), %%mm2 \n\t" // L1
  1870. PAVGB(%%mm2, %%mm0)
  1871. "movq %%mm0, (%0) \n\t"
  1872. "movq (%%eax, %1, 2), %%mm0 \n\t" // L3
  1873. PAVGB(%%mm0, %%mm2) // L1+L3
  1874. PAVGB(%%mm1, %%mm2) // 2L2 + L1 + L3
  1875. "movq %%mm2, (%%eax) \n\t"
  1876. "movq (%0, %1, 4), %%mm2 \n\t" // L4
  1877. PAVGB(%%mm2, %%mm1) // L2+L4
  1878. PAVGB(%%mm0, %%mm1) // 2L3 + L2 + L4
  1879. "movq %%mm1, (%%eax, %1) \n\t"
  1880. "movq (%%ebx), %%mm1 \n\t" // L5
  1881. PAVGB(%%mm1, %%mm0) // L3+L5
  1882. PAVGB(%%mm2, %%mm0) // 2L4 + L3 + L5
  1883. "movq %%mm0, (%%eax, %1, 2) \n\t"
  1884. "movq (%%ebx, %1), %%mm0 \n\t" // L6
  1885. PAVGB(%%mm0, %%mm2) // L4+L6
  1886. PAVGB(%%mm1, %%mm2) // 2L5 + L4 + L6
  1887. "movq %%mm2, (%0, %1, 4) \n\t"
  1888. "movq (%%ebx, %1, 2), %%mm2 \n\t" // L7
  1889. PAVGB(%%mm2, %%mm1) // L5+L7
  1890. PAVGB(%%mm0, %%mm1) // 2L6 + L5 + L7
  1891. "movq %%mm1, (%%ebx) \n\t"
  1892. PAVGB(%%mm2, %%mm0) // L7 + L8
  1893. "movq %%mm0, (%%ebx, %1) \n\t"
  1894. "movq %%mm0, (%%ebx, %1, 2) \n\t"
  1895. : : "r" (src), "r" (stride)
  1896. : "%eax", "%ebx"
  1897. );
  1898. #else
  1899. int x;
  1900. for(x=0; x<8; x++)
  1901. {
  1902. src[0 ] = (src[0 ] + 2*src[stride ] + src[stride*2])>>2;
  1903. src[stride ] = (src[stride ] + 2*src[stride*2] + src[stride*3])>>2;
  1904. src[stride*2] = (src[stride*2] + 2*src[stride*3] + src[stride*4])>>2;
  1905. src[stride*3] = (src[stride*3] + 2*src[stride*4] + src[stride*5])>>2;
  1906. src[stride*4] = (src[stride*4] + 2*src[stride*5] + src[stride*6])>>2;
  1907. src[stride*5] = (src[stride*5] + 2*src[stride*6] + src[stride*7])>>2;
  1908. src[stride*6] = (src[stride*6] + src[stride*7])>>1;
  1909. src[stride*7] = src[stride*6];
  1910. src++;
  1911. }
  1912. #endif
  1913. }
  1914. /**
  1915. * Deinterlaces the given block
  1916. * will be called for every 8x8 block, except the last row, and can read & write into an 8x16 block
  1917. */
  1918. static inline void deInterlaceMedian(uint8_t src[], int stride)
  1919. {
  1920. #ifdef HAVE_MMX
  1921. #ifdef HAVE_MMX2
  1922. asm volatile(
  1923. "leal (%0, %1), %%eax \n\t"
  1924. "leal (%%eax, %1, 4), %%ebx \n\t"
  1925. // 0 1 2 3 4 5 6 7 8 9
  1926. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1927. "movq (%0), %%mm0 \n\t" //
  1928. "movq (%%eax, %1), %%mm2 \n\t" //
  1929. "movq (%%eax), %%mm1 \n\t" //
  1930. "movq %%mm0, %%mm3 \n\t"
  1931. "pmaxub %%mm1, %%mm0 \n\t" //
  1932. "pminub %%mm3, %%mm1 \n\t" //
  1933. "pmaxub %%mm2, %%mm1 \n\t" //
  1934. "pminub %%mm1, %%mm0 \n\t"
  1935. "movq %%mm0, (%%eax) \n\t"
  1936. "movq (%0, %1, 4), %%mm0 \n\t" //
  1937. "movq (%%eax, %1, 2), %%mm1 \n\t" //
  1938. "movq %%mm2, %%mm3 \n\t"
  1939. "pmaxub %%mm1, %%mm2 \n\t" //
  1940. "pminub %%mm3, %%mm1 \n\t" //
  1941. "pmaxub %%mm0, %%mm1 \n\t" //
  1942. "pminub %%mm1, %%mm2 \n\t"
  1943. "movq %%mm2, (%%eax, %1, 2) \n\t"
  1944. "movq (%%ebx), %%mm2 \n\t" //
  1945. "movq (%%ebx, %1), %%mm1 \n\t" //
  1946. "movq %%mm2, %%mm3 \n\t"
  1947. "pmaxub %%mm0, %%mm2 \n\t" //
  1948. "pminub %%mm3, %%mm0 \n\t" //
  1949. "pmaxub %%mm1, %%mm0 \n\t" //
  1950. "pminub %%mm0, %%mm2 \n\t"
  1951. "movq %%mm2, (%%ebx) \n\t"
  1952. "movq (%%ebx, %1, 2), %%mm2 \n\t" //
  1953. "movq (%0, %1, 8), %%mm0 \n\t" //
  1954. "movq %%mm2, %%mm3 \n\t"
  1955. "pmaxub %%mm0, %%mm2 \n\t" //
  1956. "pminub %%mm3, %%mm0 \n\t" //
  1957. "pmaxub %%mm1, %%mm0 \n\t" //
  1958. "pminub %%mm0, %%mm2 \n\t"
  1959. "movq %%mm2, (%%ebx, %1, 2) \n\t"
  1960. : : "r" (src), "r" (stride)
  1961. : "%eax", "%ebx"
  1962. );
  1963. #else // MMX without MMX2
  1964. asm volatile(
  1965. "leal (%0, %1), %%eax \n\t"
  1966. "leal (%%eax, %1, 4), %%ebx \n\t"
  1967. // 0 1 2 3 4 5 6 7 8 9
  1968. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1969. "pxor %%mm7, %%mm7 \n\t"
  1970. #define MEDIAN(a,b,c)\
  1971. "movq " #a ", %%mm0 \n\t"\
  1972. "movq " #b ", %%mm2 \n\t"\
  1973. "movq " #c ", %%mm1 \n\t"\
  1974. "movq %%mm0, %%mm3 \n\t"\
  1975. "movq %%mm1, %%mm4 \n\t"\
  1976. "movq %%mm2, %%mm5 \n\t"\
  1977. "psubusb %%mm1, %%mm3 \n\t"\
  1978. "psubusb %%mm2, %%mm4 \n\t"\
  1979. "psubusb %%mm0, %%mm5 \n\t"\
  1980. "pcmpeqb %%mm7, %%mm3 \n\t"\
  1981. "pcmpeqb %%mm7, %%mm4 \n\t"\
  1982. "pcmpeqb %%mm7, %%mm5 \n\t"\
  1983. "movq %%mm3, %%mm6 \n\t"\
  1984. "pxor %%mm4, %%mm3 \n\t"\
  1985. "pxor %%mm5, %%mm4 \n\t"\
  1986. "pxor %%mm6, %%mm5 \n\t"\
  1987. "por %%mm3, %%mm1 \n\t"\
  1988. "por %%mm4, %%mm2 \n\t"\
  1989. "por %%mm5, %%mm0 \n\t"\
  1990. "pand %%mm2, %%mm0 \n\t"\
  1991. "pand %%mm1, %%mm0 \n\t"\
  1992. "movq %%mm0, " #b " \n\t"
  1993. MEDIAN((%0), (%%eax), (%%eax, %1))
  1994. MEDIAN((%%eax, %1), (%%eax, %1, 2), (%0, %1, 4))
  1995. MEDIAN((%0, %1, 4), (%%ebx), (%%ebx, %1))
  1996. MEDIAN((%%ebx, %1), (%%ebx, %1, 2), (%0, %1, 8))
  1997. : : "r" (src), "r" (stride)
  1998. : "%eax", "%ebx"
  1999. );
  2000. #endif // MMX
  2001. #else
  2002. //FIXME
  2003. int x;
  2004. for(x=0; x<8; x++)
  2005. {
  2006. src[0 ] = (src[0 ] + 2*src[stride ] + src[stride*2])>>2;
  2007. src[stride ] = (src[stride ] + 2*src[stride*2] + src[stride*3])>>2;
  2008. src[stride*2] = (src[stride*2] + 2*src[stride*3] + src[stride*4])>>2;
  2009. src[stride*3] = (src[stride*3] + 2*src[stride*4] + src[stride*5])>>2;
  2010. src[stride*4] = (src[stride*4] + 2*src[stride*5] + src[stride*6])>>2;
  2011. src[stride*5] = (src[stride*5] + 2*src[stride*6] + src[stride*7])>>2;
  2012. src[stride*6] = (src[stride*6] + 2*src[stride*7] + src[stride*8])>>2;
  2013. src[stride*7] = (src[stride*7] + 2*src[stride*8] + src[stride*9])>>2;
  2014. src++;
  2015. }
  2016. #endif
  2017. }
  2018. /**
  2019. * Deinterlaces the given block
  2020. * will be called for every 8x8 block, in the last row, and can read & write into an 8x8 block
  2021. */
  2022. static inline void deInterlaceMedianLastRow(uint8_t src[], int stride)
  2023. {
  2024. #ifdef HAVE_MMX
  2025. #ifdef HAVE_MMX2
  2026. asm volatile(
  2027. "leal (%0, %1), %%eax \n\t"
  2028. "leal (%%eax, %1, 4), %%ebx \n\t"
  2029. // 0 1 2 3 4 5 6 7 8 9
  2030. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  2031. "movq (%0), %%mm0 \n\t" //
  2032. "movq (%%eax, %1), %%mm2 \n\t" //
  2033. "movq (%%eax), %%mm1 \n\t" //
  2034. "movq %%mm0, %%mm3 \n\t"
  2035. "pmaxub %%mm1, %%mm0 \n\t" //
  2036. "pminub %%mm3, %%mm1 \n\t" //
  2037. "pmaxub %%mm2, %%mm1 \n\t" //
  2038. "pminub %%mm1, %%mm0 \n\t"
  2039. "movq %%mm0, (%%eax) \n\t"
  2040. "movq (%0, %1, 4), %%mm0 \n\t" //
  2041. "movq (%%eax, %1, 2), %%mm1 \n\t" //
  2042. "movq %%mm2, %%mm3 \n\t"
  2043. "pmaxub %%mm1, %%mm2 \n\t" //
  2044. "pminub %%mm3, %%mm1 \n\t" //
  2045. "pmaxub %%mm0, %%mm1 \n\t" //
  2046. "pminub %%mm1, %%mm2 \n\t"
  2047. "movq %%mm2, (%%eax, %1, 2) \n\t"
  2048. "movq (%%ebx), %%mm2 \n\t" //
  2049. "movq (%%ebx, %1), %%mm1 \n\t" //
  2050. "movq %%mm2, %%mm3 \n\t"
  2051. "pmaxub %%mm0, %%mm2 \n\t" //
  2052. "pminub %%mm3, %%mm0 \n\t" //
  2053. "pmaxub %%mm1, %%mm0 \n\t" //
  2054. "pminub %%mm0, %%mm2 \n\t"
  2055. "movq %%mm2, (%%ebx) \n\t"
  2056. "movq %%mm1, (%%ebx, %1, 2) \n\t"
  2057. : : "r" (src), "r" (stride)
  2058. : "%eax", "%ebx"
  2059. );
  2060. #else //MMX & no MMX2
  2061. asm volatile(
  2062. "leal (%0, %1), %%eax \n\t"
  2063. "leal (%%eax, %1, 4), %%ebx \n\t"
  2064. // 0 1 2 3 4 5 6 7 8 9
  2065. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  2066. "pxor %%mm7, %%mm7 \n\t"
  2067. MEDIAN((%0), (%%eax), (%%eax, %1))
  2068. MEDIAN((%%eax, %1), (%%eax, %1, 2), (%0, %1, 4))
  2069. MEDIAN((%0, %1, 4), (%%ebx), (%%ebx, %1))
  2070. "movq (%%ebx, %1), %%mm0 \n\t"
  2071. "movq %%mm0, (%%ebx, %1, 2) \n\t"
  2072. : : "r" (src), "r" (stride)
  2073. : "%eax", "%ebx"
  2074. );
  2075. #endif //MMX
  2076. #else
  2077. //FIXME
  2078. int x;
  2079. for(x=0; x<8; x++)
  2080. {
  2081. src[0 ] = (src[0 ] + 2*src[stride ] + src[stride*2])>>2;
  2082. src[stride ] = (src[stride ] + 2*src[stride*2] + src[stride*3])>>2;
  2083. src[stride*2] = (src[stride*2] + 2*src[stride*3] + src[stride*4])>>2;
  2084. src[stride*3] = (src[stride*3] + 2*src[stride*4] + src[stride*5])>>2;
  2085. src[stride*4] = (src[stride*4] + 2*src[stride*5] + src[stride*6])>>2;
  2086. src[stride*5] = (src[stride*5] + 2*src[stride*6] + src[stride*7])>>2;
  2087. src[stride*6] = (src[stride*6] + src[stride*7])>>1;
  2088. src[stride*7] = src[stride*6];
  2089. src++;
  2090. }
  2091. #endif
  2092. }
  2093. #ifdef HAVE_ODIVX_POSTPROCESS
  2094. #include "../opendivx/postprocess.h"
  2095. int use_old_pp=0;
  2096. #endif
  2097. static void postProcess(uint8_t src[], int srcStride, uint8_t dst[], int dstStride, int width, int height,
  2098. QP_STORE_T QPs[], int QPStride, int isColor, int mode);
  2099. /**
  2100. * ...
  2101. */
  2102. void postprocess(unsigned char * src[], int src_stride,
  2103. unsigned char * dst[], int dst_stride,
  2104. int horizontal_size, int vertical_size,
  2105. QP_STORE_T *QP_store, int QP_stride,
  2106. int mode)
  2107. {
  2108. #ifdef HAVE_ODIVX_POSTPROCESS
  2109. // Note: I could make this shit outside of this file, but it would mean one
  2110. // more function call...
  2111. if(use_old_pp){
  2112. odivx_postprocess(src,src_stride,dst,dst_stride,horizontal_size,vertical_size,QP_store,QP_stride,mode);
  2113. return;
  2114. }
  2115. #endif
  2116. /*
  2117. long long T= rdtsc();
  2118. for(int y=vertical_size-1; y>=0 ; y--)
  2119. memcpy(dst[0] + y*src_stride, src[0] + y*src_stride,src_stride);
  2120. // memcpy(dst[0], src[0],src_stride*vertical_size);
  2121. printf("%4dk\r", (rdtsc()-T)/1000);
  2122. return;
  2123. */
  2124. /*
  2125. long long T= rdtsc();
  2126. while( (rdtsc() - T)/1000 < 4000);
  2127. return;
  2128. */
  2129. postProcess(src[0], src_stride, dst[0], dst_stride,
  2130. horizontal_size, vertical_size, QP_store, QP_stride, 0, mode);
  2131. horizontal_size >>= 1;
  2132. vertical_size >>= 1;
  2133. src_stride >>= 1;
  2134. dst_stride >>= 1;
  2135. mode= ((mode&0xFF)>>4) | (mode&0xFFFFFF00);
  2136. if(1)
  2137. {
  2138. postProcess(src[1], src_stride, dst[1], dst_stride,
  2139. horizontal_size, vertical_size, QP_store, QP_stride, 1, mode);
  2140. postProcess(src[2], src_stride, dst[2], dst_stride,
  2141. horizontal_size, vertical_size, QP_store, QP_stride, 1, mode);
  2142. }
  2143. else
  2144. {
  2145. memcpy(dst[1], src[1], src_stride*horizontal_size);
  2146. memcpy(dst[2], src[2], src_stride*horizontal_size);
  2147. }
  2148. }
  2149. /**
  2150. * gets the mode flags for a given quality (larger values mean slower but better postprocessing)
  2151. * 0 <= quality <= 6
  2152. */
  2153. int getPpModeForQuality(int quality){
  2154. int modes[1+GET_PP_QUALITY_MAX]= {
  2155. 0,
  2156. #if 1
  2157. // horizontal filters first
  2158. LUM_H_DEBLOCK,
  2159. LUM_H_DEBLOCK | LUM_V_DEBLOCK,
  2160. LUM_H_DEBLOCK | LUM_V_DEBLOCK | CHROM_H_DEBLOCK,
  2161. LUM_H_DEBLOCK | LUM_V_DEBLOCK | CHROM_H_DEBLOCK | CHROM_V_DEBLOCK,
  2162. LUM_H_DEBLOCK | LUM_V_DEBLOCK | CHROM_H_DEBLOCK | CHROM_V_DEBLOCK | LUM_DERING,
  2163. LUM_H_DEBLOCK | LUM_V_DEBLOCK | CHROM_H_DEBLOCK | CHROM_V_DEBLOCK | LUM_DERING | CHROM_DERING
  2164. #else
  2165. // vertical filters first
  2166. LUM_V_DEBLOCK,
  2167. LUM_V_DEBLOCK | LUM_H_DEBLOCK,
  2168. LUM_V_DEBLOCK | LUM_H_DEBLOCK | CHROM_V_DEBLOCK,
  2169. LUM_V_DEBLOCK | LUM_H_DEBLOCK | CHROM_V_DEBLOCK | CHROM_H_DEBLOCK,
  2170. LUM_V_DEBLOCK | LUM_H_DEBLOCK | CHROM_V_DEBLOCK | CHROM_H_DEBLOCK | LUM_DERING,
  2171. LUM_V_DEBLOCK | LUM_H_DEBLOCK | CHROM_V_DEBLOCK | CHROM_H_DEBLOCK | LUM_DERING | CHROM_DERING
  2172. #endif
  2173. };
  2174. #ifdef HAVE_ODIVX_POSTPROCESS
  2175. int odivx_modes[1+GET_PP_QUALITY_MAX]= {
  2176. 0,
  2177. PP_DEBLOCK_Y_H,
  2178. PP_DEBLOCK_Y_H|PP_DEBLOCK_Y_V,
  2179. PP_DEBLOCK_Y_H|PP_DEBLOCK_Y_V|PP_DEBLOCK_C_H,
  2180. PP_DEBLOCK_Y_H|PP_DEBLOCK_Y_V|PP_DEBLOCK_C_H|PP_DEBLOCK_C_V,
  2181. PP_DEBLOCK_Y_H|PP_DEBLOCK_Y_V|PP_DEBLOCK_C_H|PP_DEBLOCK_C_V|PP_DERING_Y,
  2182. PP_DEBLOCK_Y_H|PP_DEBLOCK_Y_V|PP_DEBLOCK_C_H|PP_DEBLOCK_C_V|PP_DERING_Y|PP_DERING_C
  2183. };
  2184. if(use_old_pp) return odivx_modes[quality];
  2185. #endif
  2186. return modes[quality];
  2187. }
  2188. //} // extern "C"
  2189. /**
  2190. * Copies a block from src to dst and fixes the blacklevel
  2191. * numLines must be a multiple of 4
  2192. * levelFix == 0 -> dont touch the brighness & contrast
  2193. */
  2194. static inline void blockCopy(uint8_t dst[], int dstStride, uint8_t src[], int srcStride,
  2195. int numLines, int levelFix)
  2196. {
  2197. int i;
  2198. if(levelFix)
  2199. {
  2200. #ifdef HAVE_MMX
  2201. asm volatile(
  2202. "movl %4, %%eax \n\t"
  2203. "movl %%eax, temp0\n\t"
  2204. "pushl %0 \n\t"
  2205. "pushl %1 \n\t"
  2206. "leal (%2,%2), %%eax \n\t"
  2207. "leal (%3,%3), %%ebx \n\t"
  2208. "movq packedYOffset, %%mm2 \n\t"
  2209. "movq packedYScale, %%mm3 \n\t"
  2210. "pxor %%mm4, %%mm4 \n\t"
  2211. #define SCALED_CPY \
  2212. "movq (%0), %%mm0 \n\t"\
  2213. "movq (%0,%2), %%mm1 \n\t"\
  2214. "psubusb %%mm2, %%mm0 \n\t"\
  2215. "psubusb %%mm2, %%mm1 \n\t"\
  2216. "movq %%mm0, %%mm5 \n\t"\
  2217. "punpcklbw %%mm4, %%mm0 \n\t"\
  2218. "punpckhbw %%mm4, %%mm5 \n\t"\
  2219. "psllw $7, %%mm0 \n\t"\
  2220. "psllw $7, %%mm5 \n\t"\
  2221. "pmulhw %%mm3, %%mm0 \n\t"\
  2222. "pmulhw %%mm3, %%mm5 \n\t"\
  2223. "packuswb %%mm5, %%mm0 \n\t"\
  2224. "movq %%mm0, (%1) \n\t"\
  2225. "movq %%mm1, %%mm5 \n\t"\
  2226. "punpcklbw %%mm4, %%mm1 \n\t"\
  2227. "punpckhbw %%mm4, %%mm5 \n\t"\
  2228. "psllw $7, %%mm1 \n\t"\
  2229. "psllw $7, %%mm5 \n\t"\
  2230. "pmulhw %%mm3, %%mm1 \n\t"\
  2231. "pmulhw %%mm3, %%mm5 \n\t"\
  2232. "packuswb %%mm5, %%mm1 \n\t"\
  2233. "movq %%mm1, (%1, %3) \n\t"\
  2234. "1: \n\t"
  2235. SCALED_CPY
  2236. "addl %%eax, %0 \n\t"
  2237. "addl %%ebx, %1 \n\t"
  2238. SCALED_CPY
  2239. "addl %%eax, %0 \n\t"
  2240. "addl %%ebx, %1 \n\t"
  2241. "decl temp0 \n\t"
  2242. "jnz 1b \n\t"
  2243. "popl %1 \n\t"
  2244. "popl %0 \n\t"
  2245. : : "r" (src),
  2246. "r" (dst),
  2247. "r" (srcStride),
  2248. "r" (dstStride),
  2249. "m" (numLines>>2)
  2250. : "%eax", "%ebx"
  2251. );
  2252. #else
  2253. for(i=0; i<numLines; i++)
  2254. memcpy( &(dst[dstStride*i]),
  2255. &(src[srcStride*i]), BLOCK_SIZE);
  2256. #endif
  2257. }
  2258. else
  2259. {
  2260. #ifdef HAVE_MMX
  2261. asm volatile(
  2262. "movl %4, %%eax \n\t"
  2263. "movl %%eax, temp0\n\t"
  2264. "pushl %0 \n\t"
  2265. "pushl %1 \n\t"
  2266. "leal (%2,%2), %%eax \n\t"
  2267. "leal (%3,%3), %%ebx \n\t"
  2268. "movq packedYOffset, %%mm2 \n\t"
  2269. "movq packedYScale, %%mm3 \n\t"
  2270. #define SIMPLE_CPY \
  2271. "movq (%0), %%mm0 \n\t"\
  2272. "movq (%0,%2), %%mm1 \n\t"\
  2273. "movq %%mm0, (%1) \n\t"\
  2274. "movq %%mm1, (%1, %3) \n\t"\
  2275. "1: \n\t"
  2276. SIMPLE_CPY
  2277. "addl %%eax, %0 \n\t"
  2278. "addl %%ebx, %1 \n\t"
  2279. SIMPLE_CPY
  2280. "addl %%eax, %0 \n\t"
  2281. "addl %%ebx, %1 \n\t"
  2282. "decl temp0 \n\t"
  2283. "jnz 1b \n\t"
  2284. "popl %1 \n\t"
  2285. "popl %0 \n\t"
  2286. : : "r" (src),
  2287. "r" (dst),
  2288. "r" (srcStride),
  2289. "r" (dstStride),
  2290. "m" (numLines>>2)
  2291. : "%eax", "%ebx"
  2292. );
  2293. #else
  2294. for(i=0; i<numLines; i++)
  2295. memcpy( &(dst[dstStride*i]),
  2296. &(src[srcStride*i]), BLOCK_SIZE);
  2297. #endif
  2298. }
  2299. }
  2300. /**
  2301. * Filters array of bytes (Y or U or V values)
  2302. */
  2303. static void postProcess(uint8_t src[], int srcStride, uint8_t dst[], int dstStride, int width, int height,
  2304. QP_STORE_T QPs[], int QPStride, int isColor, int mode)
  2305. {
  2306. int x,y;
  2307. /* we need 64bit here otherwise we´ll going to have a problem
  2308. after watching a black picture for 5 hours*/
  2309. static uint64_t *yHistogram= NULL;
  2310. int black=0, white=255; // blackest black and whitest white in the picture
  2311. #ifdef TIMING
  2312. long long T0, T1, memcpyTime=0, vertTime=0, horizTime=0, sumTime, diffTime=0;
  2313. sumTime= rdtsc();
  2314. #endif
  2315. if(!yHistogram)
  2316. {
  2317. int i;
  2318. yHistogram= (uint64_t*)malloc(8*256);
  2319. for(i=0; i<256; i++) yHistogram[i]= width*height/64*15/256;
  2320. }
  2321. if(!isColor)
  2322. {
  2323. uint64_t sum= 0;
  2324. int i;
  2325. static int framenum= -1;
  2326. uint64_t maxClipped;
  2327. uint64_t clipped;
  2328. double scale;
  2329. framenum++;
  2330. if(framenum == 1) yHistogram[0]= width*height/64*15/256;
  2331. for(i=0; i<256; i++)
  2332. {
  2333. sum+= yHistogram[i];
  2334. // printf("%d ", yHistogram[i]);
  2335. }
  2336. // printf("\n\n");
  2337. /* we allways get a completly black picture first */
  2338. maxClipped= (uint64_t)(sum * maxClippedThreshold);
  2339. clipped= sum;
  2340. for(black=255; black>0; black--)
  2341. {
  2342. if(clipped < maxClipped) break;
  2343. clipped-= yHistogram[black];
  2344. }
  2345. clipped= sum;
  2346. for(white=0; white<256; white++)
  2347. {
  2348. if(clipped < maxClipped) break;
  2349. clipped-= yHistogram[white];
  2350. }
  2351. // we cant handle negative correctures
  2352. packedYOffset= MAX(black - minAllowedY, 0);
  2353. packedYOffset|= packedYOffset<<32;
  2354. packedYOffset|= packedYOffset<<16;
  2355. packedYOffset|= packedYOffset<<8;
  2356. scale= (double)(maxAllowedY - minAllowedY) / (double)(white-black);
  2357. packedYScale= (uint16_t)(scale*512.0 + 0.5);
  2358. packedYScale|= packedYScale<<32;
  2359. packedYScale|= packedYScale<<16;
  2360. }
  2361. else
  2362. {
  2363. packedYScale= 0x0100010001000100LL;
  2364. packedYOffset= 0;
  2365. }
  2366. for(x=0; x<width; x+=BLOCK_SIZE)
  2367. blockCopy(dst + x, dstStride, src + x, srcStride, 8, mode & LEVEL_FIX);
  2368. for(y=0; y<height-7; y+=BLOCK_SIZE)
  2369. {
  2370. //1% speedup if these are here instead of the inner loop
  2371. uint8_t *srcBlock= &(src[y*srcStride]);
  2372. uint8_t *dstBlock= &(dst[y*dstStride]);
  2373. uint8_t *vertSrcBlock= &(srcBlock[srcStride*3]); // Blocks are 10x8 -> *3 to start
  2374. uint8_t *vertBlock= &(dstBlock[dstStride*3]);
  2375. // finish 1 block before the next otherwise we´ll might have a problem
  2376. // with the L1 Cache of the P4 ... or only a few blocks at a time or soemthing
  2377. for(x=0; x<width; x+=BLOCK_SIZE)
  2378. {
  2379. const int stride= dstStride;
  2380. int QP= isColor ?
  2381. QPs[(y>>3)*QPStride + (x>>3)]:
  2382. QPs[(y>>4)*QPStride + (x>>4)];
  2383. if(!isColor && (mode & LEVEL_FIX)) QP= (QP* (packedYScale &0xFFFF))>>8;
  2384. #ifdef HAVE_MMX
  2385. asm volatile(
  2386. "movd %0, %%mm7 \n\t"
  2387. "packuswb %%mm7, %%mm7 \n\t" // 0, 0, 0, QP, 0, 0, 0, QP
  2388. "packuswb %%mm7, %%mm7 \n\t" // 0,QP, 0, QP, 0,QP, 0, QP
  2389. "packuswb %%mm7, %%mm7 \n\t" // QP,..., QP
  2390. "movq %%mm7, pQPb \n\t"
  2391. : : "r" (QP)
  2392. );
  2393. #endif
  2394. if(y + 12 < height)
  2395. {
  2396. #ifdef MORE_TIMING
  2397. T0= rdtsc();
  2398. #endif
  2399. #ifdef HAVE_MMX2
  2400. prefetchnta(vertSrcBlock + (((x>>3)&3) + 2)*srcStride + 32);
  2401. prefetchnta(vertSrcBlock + (((x>>3)&3) + 6)*srcStride + 32);
  2402. prefetcht0(vertBlock + (((x>>3)&3) + 2)*dstStride + 32);
  2403. prefetcht0(vertBlock + (((x>>3)&3) + 6)*dstStride + 32);
  2404. #elif defined(HAVE_3DNOW)
  2405. //FIXME check if this is faster on an 3dnow chip or if its faster without the prefetch or ...
  2406. /* prefetch(vertSrcBlock + (((x>>3)&3) + 2)*srcStride + 32);
  2407. prefetch(vertSrcBlock + (((x>>3)&3) + 6)*srcStride + 32);
  2408. prefetchw(vertBlock + (((x>>3)&3) + 2)*dstStride + 32);
  2409. prefetchw(vertBlock + (((x>>3)&3) + 6)*dstStride + 32);
  2410. */
  2411. #endif
  2412. if(!isColor) yHistogram[ srcBlock[0] ]++;
  2413. blockCopy(vertBlock + dstStride*2, dstStride,
  2414. vertSrcBlock + srcStride*2, srcStride, 8, mode & LEVEL_FIX);
  2415. if(mode & LINEAR_IPOL_DEINT_FILTER)
  2416. deInterlaceInterpolateLinear(dstBlock, dstStride);
  2417. else if(mode & LINEAR_BLEND_DEINT_FILTER)
  2418. deInterlaceBlendLinear(dstBlock, dstStride);
  2419. else if(mode & MEDIAN_DEINT_FILTER)
  2420. deInterlaceMedian(dstBlock, dstStride);
  2421. /* else if(mode & CUBIC_IPOL_DEINT_FILTER)
  2422. deInterlaceInterpolateCubic(dstBlock, dstStride);
  2423. else if(mode & CUBIC_BLEND_DEINT_FILTER)
  2424. deInterlaceBlendCubic(dstBlock, dstStride);
  2425. */
  2426. #ifdef MORE_TIMING
  2427. T1= rdtsc();
  2428. memcpyTime+= T1-T0;
  2429. T0=T1;
  2430. #endif
  2431. if(mode & V_DEBLOCK)
  2432. {
  2433. if(mode & V_RK1_FILTER)
  2434. vertRK1Filter(vertBlock, stride, QP);
  2435. else if(mode & V_X1_FILTER)
  2436. vertX1Filter(vertBlock, stride, QP);
  2437. else
  2438. {
  2439. if( isVertDC(vertBlock, stride))
  2440. {
  2441. if(isVertMinMaxOk(vertBlock, stride, QP))
  2442. doVertLowPass(vertBlock, stride, QP);
  2443. }
  2444. else
  2445. doVertDefFilter(vertBlock, stride, QP);
  2446. }
  2447. }
  2448. #ifdef MORE_TIMING
  2449. T1= rdtsc();
  2450. vertTime+= T1-T0;
  2451. T0=T1;
  2452. #endif
  2453. }
  2454. else
  2455. {
  2456. blockCopy(vertBlock + dstStride*1, dstStride,
  2457. vertSrcBlock + srcStride*1, srcStride, 4, mode & LEVEL_FIX);
  2458. if(mode & LINEAR_IPOL_DEINT_FILTER)
  2459. deInterlaceInterpolateLinearLastRow(dstBlock, dstStride);
  2460. else if(mode & LINEAR_BLEND_DEINT_FILTER)
  2461. deInterlaceBlendLinearLastRow(dstBlock, dstStride);
  2462. else if(mode & MEDIAN_DEINT_FILTER)
  2463. deInterlaceMedianLastRow(dstBlock, dstStride);
  2464. /* else if(mode & CUBIC_IPOL_DEINT_FILTER)
  2465. deInterlaceInterpolateCubicLastRow(dstBlock, dstStride);
  2466. else if(mode & CUBIC_BLEND_DEINT_FILTER)
  2467. deInterlaceBlendCubicLastRow(dstBlock, dstStride);
  2468. */
  2469. }
  2470. if(x - 8 >= 0 && x<width)
  2471. {
  2472. #ifdef MORE_TIMING
  2473. T0= rdtsc();
  2474. #endif
  2475. if(mode & H_DEBLOCK)
  2476. {
  2477. if(mode & H_X1_FILTER)
  2478. horizX1Filter(dstBlock-4, stride, QP);
  2479. else
  2480. {
  2481. if( isHorizDCAndCopy2Temp(dstBlock-4, stride))
  2482. {
  2483. if(isHorizMinMaxOk(tempBlock, TEMP_STRIDE, QP))
  2484. doHorizLowPassAndCopyBack(dstBlock-4, stride, QP);
  2485. }
  2486. else
  2487. doHorizDefFilterAndCopyBack(dstBlock-4, stride, QP);
  2488. }
  2489. }
  2490. #ifdef MORE_TIMING
  2491. T1= rdtsc();
  2492. horizTime+= T1-T0;
  2493. T0=T1;
  2494. #endif
  2495. dering(dstBlock - 9 - stride, stride, QP);
  2496. }
  2497. else if(y!=0)
  2498. dering(dstBlock - stride*9 + width-9, stride, QP);
  2499. //FIXME dering filter will not be applied to last block (bottom right)
  2500. dstBlock+=8;
  2501. srcBlock+=8;
  2502. vertBlock+=8;
  2503. vertSrcBlock+=8;
  2504. }
  2505. }
  2506. #ifdef HAVE_3DNOW
  2507. asm volatile("femms");
  2508. #elif defined (HAVE_MMX)
  2509. asm volatile("emms");
  2510. #endif
  2511. #ifdef TIMING
  2512. // FIXME diff is mostly the time spent for rdtsc (should subtract that but ...)
  2513. sumTime= rdtsc() - sumTime;
  2514. if(!isColor)
  2515. printf("cpy:%4dk, vert:%4dk, horiz:%4dk, sum:%4dk, diff:%4dk, color: %d/%d \r",
  2516. (int)(memcpyTime/1000), (int)(vertTime/1000), (int)(horizTime/1000),
  2517. (int)(sumTime/1000), (int)((sumTime-memcpyTime-vertTime-horizTime)/1000)
  2518. , black, white);
  2519. #endif
  2520. }