You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

4306 lines
125KB

  1. /*
  2. Copyright (C) 2001 Michael Niedermayer (michaelni@gmx.at)
  3. This program is free software; you can redistribute it and/or modify
  4. it under the terms of the GNU General Public License as published by
  5. the Free Software Foundation; either version 2 of the License, or
  6. (at your option) any later version.
  7. This program is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU General Public License for more details.
  11. You should have received a copy of the GNU General Public License
  12. along with this program; if not, write to the Free Software
  13. Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  14. */
  15. /*
  16. C MMX MMX2 3DNow
  17. isVertDC Ec Ec
  18. isVertMinMaxOk Ec Ec
  19. doVertLowPass E e e
  20. doVertDefFilter Ec Ec e e
  21. isHorizDC Ec Ec
  22. isHorizMinMaxOk a E
  23. doHorizLowPass E e e
  24. doHorizDefFilter Ec Ec e e
  25. deRing E e e*
  26. Vertical RKAlgo1 E a a
  27. Horizontal RKAlgo1 a a
  28. Vertical X1# a E E
  29. Horizontal X1# a E E
  30. LinIpolDeinterlace e E E*
  31. CubicIpolDeinterlace a e e*
  32. LinBlendDeinterlace e E E*
  33. MedianDeinterlace# Ec Ec
  34. TempDeNoiser# E e e
  35. * i dont have a 3dnow CPU -> its untested, but noone said it doesnt work so it seems to work
  36. # more or less selfinvented filters so the exactness isnt too meaningfull
  37. E = Exact implementation
  38. e = allmost exact implementation (slightly different rounding,...)
  39. a = alternative / approximate impl
  40. c = checked against the other implementations (-vo md5)
  41. */
  42. /*
  43. TODO:
  44. verify that everything workes as it should (how?)
  45. reduce the time wasted on the mem transfer
  46. implement everything in C at least (done at the moment but ...)
  47. unroll stuff if instructions depend too much on the prior one
  48. we use 8x8 blocks for the horizontal filters, opendivx seems to use 8x4?
  49. move YScale thing to the end instead of fixing QP
  50. write a faster and higher quality deblocking filter :)
  51. make the mainloop more flexible (variable number of blocks at once
  52. (the if/else stuff per block is slowing things down)
  53. compare the quality & speed of all filters
  54. split this huge file
  55. border remover
  56. optimize c versions
  57. try to unroll inner for(x=0 ... loop to avoid these damn if(x ... checks
  58. smart blur
  59. ...
  60. */
  61. //Changelog: use the CVS log
  62. #include "../config.h"
  63. #include <inttypes.h>
  64. #include <stdio.h>
  65. #include <stdlib.h>
  66. #include <string.h>
  67. #ifdef HAVE_MALLOC_H
  68. #include <malloc.h>
  69. #endif
  70. //#undef HAVE_MMX2
  71. //#define HAVE_3DNOW
  72. //#undef HAVE_MMX
  73. //#define DEBUG_BRIGHTNESS
  74. #include "postprocess.h"
  75. #define MIN(a,b) ((a) > (b) ? (b) : (a))
  76. #define MAX(a,b) ((a) < (b) ? (b) : (a))
  77. #define ABS(a) ((a) > 0 ? (a) : (-(a)))
  78. #define SIGN(a) ((a) > 0 ? 1 : -1)
  79. #ifdef HAVE_MMX2
  80. #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
  81. #elif defined (HAVE_3DNOW)
  82. #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
  83. #endif
  84. #ifdef HAVE_MMX2
  85. #define PMINUB(a,b,t) "pminub " #a ", " #b " \n\t"
  86. #elif defined (HAVE_MMX)
  87. #define PMINUB(b,a,t) \
  88. "movq " #a ", " #t " \n\t"\
  89. "psubusb " #b ", " #t " \n\t"\
  90. "psubb " #t ", " #a " \n\t"
  91. #endif
  92. #ifdef HAVE_MMX2
  93. #define PMAXUB(a,b) "pmaxub " #a ", " #b " \n\t"
  94. #elif defined (HAVE_MMX)
  95. #define PMAXUB(a,b) \
  96. "psubusb " #a ", " #b " \n\t"\
  97. "paddb " #a ", " #b " \n\t"
  98. #endif
  99. #define GET_MODE_BUFFER_SIZE 500
  100. #define OPTIONS_ARRAY_SIZE 10
  101. #ifdef HAVE_MMX
  102. static volatile uint64_t __attribute__((aligned(8))) packedYOffset= 0x0000000000000000LL;
  103. static volatile uint64_t __attribute__((aligned(8))) packedYScale= 0x0100010001000100LL;
  104. static uint64_t __attribute__((aligned(8))) w05= 0x0005000500050005LL;
  105. static uint64_t __attribute__((aligned(8))) w20= 0x0020002000200020LL;
  106. static uint64_t __attribute__((aligned(8))) w1400= 0x1400140014001400LL;
  107. static uint64_t __attribute__((aligned(8))) bm00000001= 0x00000000000000FFLL;
  108. static uint64_t __attribute__((aligned(8))) bm00010000= 0x000000FF00000000LL;
  109. static uint64_t __attribute__((aligned(8))) bm00001000= 0x00000000FF000000LL;
  110. static uint64_t __attribute__((aligned(8))) bm10000000= 0xFF00000000000000LL;
  111. static uint64_t __attribute__((aligned(8))) bm10000001= 0xFF000000000000FFLL;
  112. static uint64_t __attribute__((aligned(8))) bm11000011= 0xFFFF00000000FFFFLL;
  113. static uint64_t __attribute__((aligned(8))) bm00000011= 0x000000000000FFFFLL;
  114. static uint64_t __attribute__((aligned(8))) bm11111110= 0xFFFFFFFFFFFFFF00LL;
  115. static uint64_t __attribute__((aligned(8))) bm11000000= 0xFFFF000000000000LL;
  116. static uint64_t __attribute__((aligned(8))) bm00011000= 0x000000FFFF000000LL;
  117. static uint64_t __attribute__((aligned(8))) bm00110011= 0x0000FFFF0000FFFFLL;
  118. static uint64_t __attribute__((aligned(8))) bm11001100= 0xFFFF0000FFFF0000LL;
  119. static uint64_t __attribute__((aligned(8))) b00= 0x0000000000000000LL;
  120. static uint64_t __attribute__((aligned(8))) b01= 0x0101010101010101LL;
  121. static uint64_t __attribute__((aligned(8))) b02= 0x0202020202020202LL;
  122. static uint64_t __attribute__((aligned(8))) b0F= 0x0F0F0F0F0F0F0F0FLL;
  123. static uint64_t __attribute__((aligned(8))) b04= 0x0404040404040404LL;
  124. static uint64_t __attribute__((aligned(8))) b08= 0x0808080808080808LL;
  125. static uint64_t __attribute__((aligned(8))) bFF= 0xFFFFFFFFFFFFFFFFLL;
  126. static uint64_t __attribute__((aligned(8))) b20= 0x2020202020202020LL;
  127. static uint64_t __attribute__((aligned(8))) b80= 0x8080808080808080LL;
  128. static uint64_t __attribute__((aligned(8))) b7E= 0x7E7E7E7E7E7E7E7ELL;
  129. static uint64_t __attribute__((aligned(8))) b7C= 0x7C7C7C7C7C7C7C7CLL;
  130. static uint64_t __attribute__((aligned(8))) b3F= 0x3F3F3F3F3F3F3F3FLL;
  131. static uint64_t __attribute__((aligned(8))) temp0=0;
  132. static uint64_t __attribute__((aligned(8))) temp1=0;
  133. static uint64_t __attribute__((aligned(8))) temp2=0;
  134. static uint64_t __attribute__((aligned(8))) temp3=0;
  135. static uint64_t __attribute__((aligned(8))) temp4=0;
  136. static uint64_t __attribute__((aligned(8))) temp5=0;
  137. static uint64_t __attribute__((aligned(8))) pQPb=0;
  138. static uint64_t __attribute__((aligned(8))) pQPb2=0;
  139. static uint8_t __attribute__((aligned(8))) tempBlocks[8*16*2]; //used for the horizontal code
  140. static uint32_t __attribute__((aligned(4))) maxTmpNoise[4];
  141. #else
  142. static uint64_t packedYOffset= 0x0000000000000000LL;
  143. static uint64_t packedYScale= 0x0100010001000100LL;
  144. static uint8_t tempBlocks[8*16*2]; //used for the horizontal code
  145. #endif
  146. int hFlatnessThreshold= 56 - 16;
  147. int vFlatnessThreshold= 56 - 16;
  148. //amount of "black" u r willing to loose to get a brightness corrected picture
  149. double maxClippedThreshold= 0.01;
  150. int maxAllowedY=234;
  151. int minAllowedY=16;
  152. static struct PPFilter filters[]=
  153. {
  154. {"hb", "hdeblock", 1, 1, 3, H_DEBLOCK},
  155. {"vb", "vdeblock", 1, 2, 4, V_DEBLOCK},
  156. {"vr", "rkvdeblock", 1, 2, 4, H_RK1_FILTER},
  157. {"h1", "x1hdeblock", 1, 1, 3, H_X1_FILTER},
  158. {"v1", "x1vdeblock", 1, 2, 4, V_X1_FILTER},
  159. {"dr", "dering", 1, 5, 6, DERING},
  160. {"al", "autolevels", 0, 1, 2, LEVEL_FIX},
  161. {"lb", "linblenddeint", 0, 1, 6, LINEAR_BLEND_DEINT_FILTER},
  162. {"li", "linipoldeint", 0, 1, 6, LINEAR_IPOL_DEINT_FILTER},
  163. {"ci", "cubicipoldeint", 0, 1, 6, CUBIC_IPOL_DEINT_FILTER},
  164. {"md", "mediandeint", 0, 1, 6, MEDIAN_DEINT_FILTER},
  165. {"tn", "tmpnoise", 1, 7, 8, TEMP_NOISE_FILTER},
  166. {NULL, NULL,0,0,0,0} //End Marker
  167. };
  168. static char *replaceTable[]=
  169. {
  170. "default", "hdeblock:a,vdeblock:a,dering:a,autolevels,tmpnoise:a:150:200:400",
  171. "de", "hdeblock:a,vdeblock:a,dering:a,autolevels,tmpnoise:a:150:200:400",
  172. "fast", "x1hdeblock:a,x1vdeblock:a,dering:a,autolevels,tmpnoise:a:150:200:400",
  173. "fa", "x1hdeblock:a,x1vdeblock:a,dering:a,autolevels,tmpnoise:a:150:200:400",
  174. NULL //End Marker
  175. };
  176. #ifdef HAVE_MMX
  177. static inline void unusedVariableWarningFixer()
  178. {
  179. if(
  180. packedYOffset + packedYScale + w05 + w20 + w1400 + bm00000001 + bm00010000
  181. + bm00001000 + bm10000000 + bm10000001 + bm11000011 + bm00000011 + bm11111110
  182. + bm11000000 + bm00011000 + bm00110011 + bm11001100 + b00 + b01 + b02 + b0F
  183. + bFF + b20 + b04+ b08 + pQPb2 + b80 + b7E + b7C + b3F + temp0 + temp1 + temp2 + temp3 + temp4
  184. + temp5 + pQPb== 0) b00=0;
  185. }
  186. #endif
  187. #ifdef TIMING
  188. static inline long long rdtsc()
  189. {
  190. long long l;
  191. asm volatile( "rdtsc\n\t"
  192. : "=A" (l)
  193. );
  194. // printf("%d\n", int(l/1000));
  195. return l;
  196. }
  197. #endif
  198. #ifdef HAVE_MMX2
  199. static inline void prefetchnta(void *p)
  200. {
  201. asm volatile( "prefetchnta (%0)\n\t"
  202. : : "r" (p)
  203. );
  204. }
  205. static inline void prefetcht0(void *p)
  206. {
  207. asm volatile( "prefetcht0 (%0)\n\t"
  208. : : "r" (p)
  209. );
  210. }
  211. static inline void prefetcht1(void *p)
  212. {
  213. asm volatile( "prefetcht1 (%0)\n\t"
  214. : : "r" (p)
  215. );
  216. }
  217. static inline void prefetcht2(void *p)
  218. {
  219. asm volatile( "prefetcht2 (%0)\n\t"
  220. : : "r" (p)
  221. );
  222. }
  223. #endif
  224. //FIXME? |255-0| = 1 (shouldnt be a problem ...)
  225. /**
  226. * Check if the middle 8x8 Block in the given 8x16 block is flat
  227. */
  228. static inline int isVertDC(uint8_t src[], int stride){
  229. int numEq= 0;
  230. #ifndef HAVE_MMX
  231. int y;
  232. #endif
  233. src+= stride*4; // src points to begin of the 8x8 Block
  234. #ifdef HAVE_MMX
  235. asm volatile(
  236. "leal (%1, %2), %%eax \n\t"
  237. "leal (%%eax, %2, 4), %%ebx \n\t"
  238. // 0 1 2 3 4 5 6 7 8 9
  239. // %1 eax eax+%2 eax+2%2 %1+4%2 ebx ebx+%2 ebx+2%2 %1+8%2 ebx+4%2
  240. "movq b7E, %%mm7 \n\t" // mm7 = 0x7F
  241. "movq b7C, %%mm6 \n\t" // mm6 = 0x7D
  242. "movq (%1), %%mm0 \n\t"
  243. "movq (%%eax), %%mm1 \n\t"
  244. "psubb %%mm1, %%mm0 \n\t" // mm0 = differnece
  245. "paddb %%mm7, %%mm0 \n\t"
  246. "pcmpgtb %%mm6, %%mm0 \n\t"
  247. "movq (%%eax,%2), %%mm2 \n\t"
  248. "psubb %%mm2, %%mm1 \n\t"
  249. "paddb %%mm7, %%mm1 \n\t"
  250. "pcmpgtb %%mm6, %%mm1 \n\t"
  251. "paddb %%mm1, %%mm0 \n\t"
  252. "movq (%%eax, %2, 2), %%mm1 \n\t"
  253. "psubb %%mm1, %%mm2 \n\t"
  254. "paddb %%mm7, %%mm2 \n\t"
  255. "pcmpgtb %%mm6, %%mm2 \n\t"
  256. "paddb %%mm2, %%mm0 \n\t"
  257. "movq (%1, %2, 4), %%mm2 \n\t"
  258. "psubb %%mm2, %%mm1 \n\t"
  259. "paddb %%mm7, %%mm1 \n\t"
  260. "pcmpgtb %%mm6, %%mm1 \n\t"
  261. "paddb %%mm1, %%mm0 \n\t"
  262. "movq (%%ebx), %%mm1 \n\t"
  263. "psubb %%mm1, %%mm2 \n\t"
  264. "paddb %%mm7, %%mm2 \n\t"
  265. "pcmpgtb %%mm6, %%mm2 \n\t"
  266. "paddb %%mm2, %%mm0 \n\t"
  267. "movq (%%ebx, %2), %%mm2 \n\t"
  268. "psubb %%mm2, %%mm1 \n\t"
  269. "paddb %%mm7, %%mm1 \n\t"
  270. "pcmpgtb %%mm6, %%mm1 \n\t"
  271. "paddb %%mm1, %%mm0 \n\t"
  272. "movq (%%ebx, %2, 2), %%mm1 \n\t"
  273. "psubb %%mm1, %%mm2 \n\t"
  274. "paddb %%mm7, %%mm2 \n\t"
  275. "pcmpgtb %%mm6, %%mm2 \n\t"
  276. "paddb %%mm2, %%mm0 \n\t"
  277. " \n\t"
  278. "movq %%mm0, %%mm1 \n\t"
  279. "psrlw $8, %%mm0 \n\t"
  280. "paddb %%mm1, %%mm0 \n\t"
  281. #ifdef HAVE_MMX2
  282. "pshufw $0xF9, %%mm0, %%mm1 \n\t"
  283. "paddb %%mm1, %%mm0 \n\t"
  284. "pshufw $0xFE, %%mm0, %%mm1 \n\t"
  285. #else
  286. "movq %%mm0, %%mm1 \n\t"
  287. "psrlq $16, %%mm0 \n\t"
  288. "paddb %%mm1, %%mm0 \n\t"
  289. "movq %%mm0, %%mm1 \n\t"
  290. "psrlq $32, %%mm0 \n\t"
  291. #endif
  292. "paddb %%mm1, %%mm0 \n\t"
  293. "movd %%mm0, %0 \n\t"
  294. : "=r" (numEq)
  295. : "r" (src), "r" (stride)
  296. : "%eax", "%ebx"
  297. );
  298. numEq= (256 - numEq) &0xFF;
  299. #else
  300. for(y=0; y<BLOCK_SIZE-1; y++)
  301. {
  302. if(((src[0] - src[0+stride] + 1)&0xFFFF) < 3) numEq++;
  303. if(((src[1] - src[1+stride] + 1)&0xFFFF) < 3) numEq++;
  304. if(((src[2] - src[2+stride] + 1)&0xFFFF) < 3) numEq++;
  305. if(((src[3] - src[3+stride] + 1)&0xFFFF) < 3) numEq++;
  306. if(((src[4] - src[4+stride] + 1)&0xFFFF) < 3) numEq++;
  307. if(((src[5] - src[5+stride] + 1)&0xFFFF) < 3) numEq++;
  308. if(((src[6] - src[6+stride] + 1)&0xFFFF) < 3) numEq++;
  309. if(((src[7] - src[7+stride] + 1)&0xFFFF) < 3) numEq++;
  310. src+= stride;
  311. }
  312. #endif
  313. /* if(abs(numEq - asmEq) > 0)
  314. {
  315. printf("\nasm:%d c:%d\n", asmEq, numEq);
  316. for(int y=0; y<8; y++)
  317. {
  318. for(int x=0; x<8; x++)
  319. {
  320. printf("%d ", temp[x + y*stride]);
  321. }
  322. printf("\n");
  323. }
  324. }
  325. */
  326. // for(int i=0; i<numEq/8; i++) src[i]=255;
  327. return (numEq > vFlatnessThreshold) ? 1 : 0;
  328. }
  329. static inline int isVertMinMaxOk(uint8_t src[], int stride, int QP)
  330. {
  331. #ifdef HAVE_MMX
  332. int isOk;
  333. src+= stride*3;
  334. asm volatile(
  335. // "int $3 \n\t"
  336. "movq (%1, %2), %%mm0 \n\t"
  337. "movq (%1, %2, 8), %%mm1 \n\t"
  338. "movq %%mm0, %%mm2 \n\t"
  339. "psubusb %%mm1, %%mm0 \n\t"
  340. "psubusb %%mm2, %%mm1 \n\t"
  341. "por %%mm1, %%mm0 \n\t" // ABS Diff
  342. "movq pQPb, %%mm7 \n\t" // QP,..., QP
  343. "paddusb %%mm7, %%mm7 \n\t" // 2QP ... 2QP
  344. "psubusb %%mm7, %%mm0 \n\t" // Diff <= 2QP -> 0
  345. "pcmpeqd b00, %%mm0 \n\t"
  346. "psrlq $16, %%mm0 \n\t"
  347. "pcmpeqd bFF, %%mm0 \n\t"
  348. // "movd %%mm0, (%1, %2, 4)\n\t"
  349. "movd %%mm0, %0 \n\t"
  350. : "=r" (isOk)
  351. : "r" (src), "r" (stride)
  352. );
  353. return isOk;
  354. #else
  355. int isOk2= 1;
  356. int x;
  357. src+= stride*3;
  358. for(x=0; x<BLOCK_SIZE; x++)
  359. {
  360. if(abs((int)src[x + stride] - (int)src[x + (stride<<3)]) > 2*QP) isOk2=0;
  361. }
  362. /* if(isOk && !isOk2 || !isOk && isOk2)
  363. {
  364. printf("\nasm:%d c:%d QP:%d\n", isOk, isOk2, QP);
  365. for(int y=0; y<9; y++)
  366. {
  367. for(int x=0; x<8; x++)
  368. {
  369. printf("%d ", src[x + y*stride]);
  370. }
  371. printf("\n");
  372. }
  373. } */
  374. return isOk2;
  375. #endif
  376. }
  377. /**
  378. * Do a vertical low pass filter on the 8x16 block (only write to the 8x8 block in the middle)
  379. * using the 9-Tap Filter (1,1,2,2,4,2,2,1,1)/16
  380. */
  381. static inline void doVertLowPass(uint8_t *src, int stride, int QP)
  382. {
  383. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  384. src+= stride*3;
  385. asm volatile( //"movv %0 %1 %2\n\t"
  386. "movq pQPb, %%mm0 \n\t" // QP,..., QP
  387. "movq (%0), %%mm6 \n\t"
  388. "movq (%0, %1), %%mm5 \n\t"
  389. "movq %%mm5, %%mm1 \n\t"
  390. "movq %%mm6, %%mm2 \n\t"
  391. "psubusb %%mm6, %%mm5 \n\t"
  392. "psubusb %%mm1, %%mm2 \n\t"
  393. "por %%mm5, %%mm2 \n\t" // ABS Diff of lines
  394. "psubusb %%mm0, %%mm2 \n\t" // diff <= QP -> 0
  395. "pcmpeqb b00, %%mm2 \n\t" // diff <= QP -> FF
  396. "pand %%mm2, %%mm6 \n\t"
  397. "pandn %%mm1, %%mm2 \n\t"
  398. "por %%mm2, %%mm6 \n\t"// First Line to Filter
  399. "movq (%0, %1, 8), %%mm5 \n\t"
  400. "leal (%0, %1, 4), %%eax \n\t"
  401. "leal (%0, %1, 8), %%ebx \n\t"
  402. "subl %1, %%ebx \n\t"
  403. "addl %1, %0 \n\t" // %0 points to line 1 not 0
  404. "movq (%0, %1, 8), %%mm7 \n\t"
  405. "movq %%mm5, %%mm1 \n\t"
  406. "movq %%mm7, %%mm2 \n\t"
  407. "psubusb %%mm7, %%mm5 \n\t"
  408. "psubusb %%mm1, %%mm2 \n\t"
  409. "por %%mm5, %%mm2 \n\t" // ABS Diff of lines
  410. "psubusb %%mm0, %%mm2 \n\t" // diff <= QP -> 0
  411. "pcmpeqb b00, %%mm2 \n\t" // diff <= QP -> FF
  412. "pand %%mm2, %%mm7 \n\t"
  413. "pandn %%mm1, %%mm2 \n\t"
  414. "por %%mm2, %%mm7 \n\t" // First Line to Filter
  415. // 1 2 3 4 5 6 7 8
  416. // %0 %0+%1 %0+2%1 eax %0+4%1 eax+2%1 ebx eax+4%1
  417. // 6 4 2 2 1 1
  418. // 6 4 4 2
  419. // 6 8 2
  420. "movq (%0, %1), %%mm0 \n\t" // 1
  421. "movq %%mm0, %%mm1 \n\t" // 1
  422. PAVGB(%%mm6, %%mm0) //1 1 /2
  423. PAVGB(%%mm6, %%mm0) //3 1 /4
  424. "movq (%0, %1, 4), %%mm2 \n\t" // 1
  425. "movq %%mm2, %%mm5 \n\t" // 1
  426. PAVGB((%%eax), %%mm2) // 11 /2
  427. PAVGB((%0, %1, 2), %%mm2) // 211 /4
  428. "movq %%mm2, %%mm3 \n\t" // 211 /4
  429. "movq (%0), %%mm4 \n\t" // 1
  430. PAVGB(%%mm4, %%mm3) // 4 211 /8
  431. PAVGB(%%mm0, %%mm3) //642211 /16
  432. "movq %%mm3, (%0) \n\t" // X
  433. // mm1=2 mm2=3(211) mm4=1 mm5=5 mm6=0 mm7=9
  434. "movq %%mm1, %%mm0 \n\t" // 1
  435. PAVGB(%%mm6, %%mm0) //1 1 /2
  436. "movq %%mm4, %%mm3 \n\t" // 1
  437. PAVGB((%0,%1,2), %%mm3) // 1 1 /2
  438. PAVGB((%%eax,%1,2), %%mm5) // 11 /2
  439. PAVGB((%%eax), %%mm5) // 211 /4
  440. PAVGB(%%mm5, %%mm3) // 2 2211 /8
  441. PAVGB(%%mm0, %%mm3) //4242211 /16
  442. "movq %%mm3, (%0,%1) \n\t" // X
  443. // mm1=2 mm2=3(211) mm4=1 mm5=4(211) mm6=0 mm7=9
  444. PAVGB(%%mm4, %%mm6) //11 /2
  445. "movq (%%ebx), %%mm0 \n\t" // 1
  446. PAVGB((%%eax, %1, 2), %%mm0) // 11/2
  447. "movq %%mm0, %%mm3 \n\t" // 11/2
  448. PAVGB(%%mm1, %%mm0) // 2 11/4
  449. PAVGB(%%mm6, %%mm0) //222 11/8
  450. PAVGB(%%mm2, %%mm0) //22242211/16
  451. "movq (%0, %1, 2), %%mm2 \n\t" // 1
  452. "movq %%mm0, (%0, %1, 2) \n\t" // X
  453. // mm1=2 mm2=3 mm3=6(11) mm4=1 mm5=4(211) mm6=0(11) mm7=9
  454. "movq (%%eax, %1, 4), %%mm0 \n\t" // 1
  455. PAVGB((%%ebx), %%mm0) // 11 /2
  456. PAVGB(%%mm0, %%mm6) //11 11 /4
  457. PAVGB(%%mm1, %%mm4) // 11 /2
  458. PAVGB(%%mm2, %%mm1) // 11 /2
  459. PAVGB(%%mm1, %%mm6) //1122 11 /8
  460. PAVGB(%%mm5, %%mm6) //112242211 /16
  461. "movq (%%eax), %%mm5 \n\t" // 1
  462. "movq %%mm6, (%%eax) \n\t" // X
  463. // mm0=7(11) mm1=2(11) mm2=3 mm3=6(11) mm4=1(11) mm5=4 mm7=9
  464. "movq (%%eax, %1, 4), %%mm6 \n\t" // 1
  465. PAVGB(%%mm7, %%mm6) // 11 /2
  466. PAVGB(%%mm4, %%mm6) // 11 11 /4
  467. PAVGB(%%mm3, %%mm6) // 11 2211 /8
  468. PAVGB(%%mm5, %%mm2) // 11 /2
  469. "movq (%0, %1, 4), %%mm4 \n\t" // 1
  470. PAVGB(%%mm4, %%mm2) // 112 /4
  471. PAVGB(%%mm2, %%mm6) // 112242211 /16
  472. "movq %%mm6, (%0, %1, 4) \n\t" // X
  473. // mm0=7(11) mm1=2(11) mm2=3(112) mm3=6(11) mm4=5 mm5=4 mm7=9
  474. PAVGB(%%mm7, %%mm1) // 11 2 /4
  475. PAVGB(%%mm4, %%mm5) // 11 /2
  476. PAVGB(%%mm5, %%mm0) // 11 11 /4
  477. "movq (%%eax, %1, 2), %%mm6 \n\t" // 1
  478. PAVGB(%%mm6, %%mm1) // 11 4 2 /8
  479. PAVGB(%%mm0, %%mm1) // 11224222 /16
  480. "movq %%mm1, (%%eax, %1, 2) \n\t" // X
  481. // mm2=3(112) mm3=6(11) mm4=5 mm5=4(11) mm6=6 mm7=9
  482. PAVGB((%%ebx), %%mm2) // 112 4 /8
  483. "movq (%%eax, %1, 4), %%mm0 \n\t" // 1
  484. PAVGB(%%mm0, %%mm6) // 1 1 /2
  485. PAVGB(%%mm7, %%mm6) // 1 12 /4
  486. PAVGB(%%mm2, %%mm6) // 1122424 /4
  487. "movq %%mm6, (%%ebx) \n\t" // X
  488. // mm0=8 mm3=6(11) mm4=5 mm5=4(11) mm7=9
  489. PAVGB(%%mm7, %%mm5) // 11 2 /4
  490. PAVGB(%%mm7, %%mm5) // 11 6 /8
  491. PAVGB(%%mm3, %%mm0) // 112 /4
  492. PAVGB(%%mm0, %%mm5) // 112246 /16
  493. "movq %%mm5, (%%eax, %1, 4) \n\t" // X
  494. "subl %1, %0 \n\t"
  495. :
  496. : "r" (src), "r" (stride)
  497. : "%eax", "%ebx"
  498. );
  499. #else
  500. const int l1= stride;
  501. const int l2= stride + l1;
  502. const int l3= stride + l2;
  503. const int l4= stride + l3;
  504. const int l5= stride + l4;
  505. const int l6= stride + l5;
  506. const int l7= stride + l6;
  507. const int l8= stride + l7;
  508. const int l9= stride + l8;
  509. int x;
  510. src+= stride*3;
  511. for(x=0; x<BLOCK_SIZE; x++)
  512. {
  513. const int first= ABS(src[0] - src[l1]) < QP ? src[0] : src[l1];
  514. const int last= ABS(src[l8] - src[l9]) < QP ? src[l9] : src[l8];
  515. int sums[9];
  516. sums[0] = first + src[l1];
  517. sums[1] = src[l1] + src[l2];
  518. sums[2] = src[l2] + src[l3];
  519. sums[3] = src[l3] + src[l4];
  520. sums[4] = src[l4] + src[l5];
  521. sums[5] = src[l5] + src[l6];
  522. sums[6] = src[l6] + src[l7];
  523. sums[7] = src[l7] + src[l8];
  524. sums[8] = src[l8] + last;
  525. src[l1]= ((sums[0]<<2) + ((first + sums[2])<<1) + sums[4] + 8)>>4;
  526. src[l2]= ((src[l2]<<2) + ((first + sums[0] + sums[3])<<1) + sums[5] + 8)>>4;
  527. src[l3]= ((src[l3]<<2) + ((first + sums[1] + sums[4])<<1) + sums[6] + 8)>>4;
  528. src[l4]= ((src[l4]<<2) + ((sums[2] + sums[5])<<1) + sums[0] + sums[7] + 8)>>4;
  529. src[l5]= ((src[l5]<<2) + ((sums[3] + sums[6])<<1) + sums[1] + sums[8] + 8)>>4;
  530. src[l6]= ((src[l6]<<2) + ((last + sums[7] + sums[4])<<1) + sums[2] + 8)>>4;
  531. src[l7]= (((last + src[l7])<<2) + ((src[l8] + sums[5])<<1) + sums[3] + 8)>>4;
  532. src[l8]= ((sums[8]<<2) + ((last + sums[6])<<1) + sums[4] + 8)>>4;
  533. src++;
  534. }
  535. #endif
  536. }
  537. /**
  538. * Experimental implementation of the filter (Algorithm 1) described in a paper from Ramkishor & Karandikar
  539. * values are correctly clipped (MMX2)
  540. * values are wraparound (C)
  541. * conclusion: its fast, but introduces ugly horizontal patterns if there is a continious gradient
  542. 0 8 16 24
  543. x = 8
  544. x/2 = 4
  545. x/8 = 1
  546. 1 12 12 23
  547. */
  548. static inline void vertRK1Filter(uint8_t *src, int stride, int QP)
  549. {
  550. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  551. src+= stride*3;
  552. // FIXME rounding
  553. asm volatile(
  554. "pxor %%mm7, %%mm7 \n\t" // 0
  555. "movq b80, %%mm6 \n\t" // MIN_SIGNED_BYTE
  556. "leal (%0, %1), %%eax \n\t"
  557. "leal (%%eax, %1, 4), %%ebx \n\t"
  558. // 0 1 2 3 4 5 6 7 8 9
  559. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  560. "movq pQPb, %%mm0 \n\t" // QP,..., QP
  561. "movq %%mm0, %%mm1 \n\t" // QP,..., QP
  562. "paddusb b02, %%mm0 \n\t"
  563. "psrlw $2, %%mm0 \n\t"
  564. "pand b3F, %%mm0 \n\t" // QP/4,..., QP/4
  565. "paddusb %%mm1, %%mm0 \n\t" // QP*1.25 ...
  566. "movq (%0, %1, 4), %%mm2 \n\t" // line 4
  567. "movq (%%ebx), %%mm3 \n\t" // line 5
  568. "movq %%mm2, %%mm4 \n\t" // line 4
  569. "pcmpeqb %%mm5, %%mm5 \n\t" // -1
  570. "pxor %%mm2, %%mm5 \n\t" // -line 4 - 1
  571. PAVGB(%%mm3, %%mm5)
  572. "paddb %%mm6, %%mm5 \n\t" // (l5-l4)/2
  573. "psubusb %%mm3, %%mm4 \n\t"
  574. "psubusb %%mm2, %%mm3 \n\t"
  575. "por %%mm3, %%mm4 \n\t" // |l4 - l5|
  576. "psubusb %%mm0, %%mm4 \n\t"
  577. "pcmpeqb %%mm7, %%mm4 \n\t"
  578. "pand %%mm4, %%mm5 \n\t" // d/2
  579. // "paddb %%mm6, %%mm2 \n\t" // line 4 + 0x80
  580. "paddb %%mm5, %%mm2 \n\t"
  581. // "psubb %%mm6, %%mm2 \n\t"
  582. "movq %%mm2, (%0,%1, 4) \n\t"
  583. "movq (%%ebx), %%mm2 \n\t"
  584. // "paddb %%mm6, %%mm2 \n\t" // line 5 + 0x80
  585. "psubb %%mm5, %%mm2 \n\t"
  586. // "psubb %%mm6, %%mm2 \n\t"
  587. "movq %%mm2, (%%ebx) \n\t"
  588. "paddb %%mm6, %%mm5 \n\t"
  589. "psrlw $2, %%mm5 \n\t"
  590. "pand b3F, %%mm5 \n\t"
  591. "psubb b20, %%mm5 \n\t" // (l5-l4)/8
  592. "movq (%%eax, %1, 2), %%mm2 \n\t"
  593. "paddb %%mm6, %%mm2 \n\t" // line 3 + 0x80
  594. "paddsb %%mm5, %%mm2 \n\t"
  595. "psubb %%mm6, %%mm2 \n\t"
  596. "movq %%mm2, (%%eax, %1, 2) \n\t"
  597. "movq (%%ebx, %1), %%mm2 \n\t"
  598. "paddb %%mm6, %%mm2 \n\t" // line 6 + 0x80
  599. "psubsb %%mm5, %%mm2 \n\t"
  600. "psubb %%mm6, %%mm2 \n\t"
  601. "movq %%mm2, (%%ebx, %1) \n\t"
  602. :
  603. : "r" (src), "r" (stride)
  604. : "%eax", "%ebx"
  605. );
  606. #else
  607. const int l1= stride;
  608. const int l2= stride + l1;
  609. const int l3= stride + l2;
  610. const int l4= stride + l3;
  611. const int l5= stride + l4;
  612. const int l6= stride + l5;
  613. // const int l7= stride + l6;
  614. // const int l8= stride + l7;
  615. // const int l9= stride + l8;
  616. int x;
  617. const int QP15= QP + (QP>>2);
  618. src+= stride*3;
  619. for(x=0; x<BLOCK_SIZE; x++)
  620. {
  621. const int v = (src[x+l5] - src[x+l4]);
  622. if(ABS(v) < QP15)
  623. {
  624. src[x+l3] +=v>>3;
  625. src[x+l4] +=v>>1;
  626. src[x+l5] -=v>>1;
  627. src[x+l6] -=v>>3;
  628. }
  629. }
  630. #endif
  631. }
  632. /**
  633. * Experimental Filter 1
  634. * will not damage linear gradients
  635. * Flat blocks should look like they where passed through the (1,1,2,2,4,2,2,1,1) 9-Tap filter
  636. * can only smooth blocks at the expected locations (it cant smooth them if they did move)
  637. * MMX2 version does correct clipping C version doesnt
  638. */
  639. static inline void vertX1Filter(uint8_t *src, int stride, int QP)
  640. {
  641. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  642. src+= stride*3;
  643. asm volatile(
  644. "pxor %%mm7, %%mm7 \n\t" // 0
  645. // "movq b80, %%mm6 \n\t" // MIN_SIGNED_BYTE
  646. "leal (%0, %1), %%eax \n\t"
  647. "leal (%%eax, %1, 4), %%ebx \n\t"
  648. // 0 1 2 3 4 5 6 7 8 9
  649. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  650. "movq (%%eax, %1, 2), %%mm0 \n\t" // line 3
  651. "movq (%0, %1, 4), %%mm1 \n\t" // line 4
  652. "movq %%mm1, %%mm2 \n\t" // line 4
  653. "psubusb %%mm0, %%mm1 \n\t"
  654. "psubusb %%mm2, %%mm0 \n\t"
  655. "por %%mm1, %%mm0 \n\t" // |l2 - l3|
  656. "movq (%%ebx), %%mm3 \n\t" // line 5
  657. "movq (%%ebx, %1), %%mm4 \n\t" // line 6
  658. "movq %%mm3, %%mm5 \n\t" // line 5
  659. "psubusb %%mm4, %%mm3 \n\t"
  660. "psubusb %%mm5, %%mm4 \n\t"
  661. "por %%mm4, %%mm3 \n\t" // |l5 - l6|
  662. PAVGB(%%mm3, %%mm0) // (|l2 - l3| + |l5 - l6|)/2
  663. "movq %%mm2, %%mm1 \n\t" // line 4
  664. "psubusb %%mm5, %%mm2 \n\t"
  665. "movq %%mm2, %%mm4 \n\t"
  666. "pcmpeqb %%mm7, %%mm2 \n\t" // (l4 - l5) <= 0 ? -1 : 0
  667. "psubusb %%mm1, %%mm5 \n\t"
  668. "por %%mm5, %%mm4 \n\t" // |l4 - l5|
  669. "psubusb %%mm0, %%mm4 \n\t" //d = MAX(0, |l4-l5| - (|l2-l3| + |l5-l6|)/2)
  670. "movq %%mm4, %%mm3 \n\t" // d
  671. "psubusb pQPb, %%mm4 \n\t"
  672. "pcmpeqb %%mm7, %%mm4 \n\t" // d <= QP ? -1 : 0
  673. "psubusb b01, %%mm3 \n\t"
  674. "pand %%mm4, %%mm3 \n\t" // d <= QP ? d : 0
  675. PAVGB(%%mm7, %%mm3) // d/2
  676. "movq %%mm3, %%mm1 \n\t" // d/2
  677. PAVGB(%%mm7, %%mm3) // d/4
  678. PAVGB(%%mm1, %%mm3) // 3*d/8
  679. "movq (%0, %1, 4), %%mm0 \n\t" // line 4
  680. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l4-1 : l4
  681. "psubusb %%mm3, %%mm0 \n\t"
  682. "pxor %%mm2, %%mm0 \n\t"
  683. "movq %%mm0, (%0, %1, 4) \n\t" // line 4
  684. "movq (%%ebx), %%mm0 \n\t" // line 5
  685. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l5-1 : l5
  686. "paddusb %%mm3, %%mm0 \n\t"
  687. "pxor %%mm2, %%mm0 \n\t"
  688. "movq %%mm0, (%%ebx) \n\t" // line 5
  689. PAVGB(%%mm7, %%mm1) // d/4
  690. "movq (%%eax, %1, 2), %%mm0 \n\t" // line 3
  691. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l4-1 : l4
  692. "psubusb %%mm1, %%mm0 \n\t"
  693. "pxor %%mm2, %%mm0 \n\t"
  694. "movq %%mm0, (%%eax, %1, 2) \n\t" // line 3
  695. "movq (%%ebx, %1), %%mm0 \n\t" // line 6
  696. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l5-1 : l5
  697. "paddusb %%mm1, %%mm0 \n\t"
  698. "pxor %%mm2, %%mm0 \n\t"
  699. "movq %%mm0, (%%ebx, %1) \n\t" // line 6
  700. PAVGB(%%mm7, %%mm1) // d/8
  701. "movq (%%eax, %1), %%mm0 \n\t" // line 2
  702. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l2-1 : l2
  703. "psubusb %%mm1, %%mm0 \n\t"
  704. "pxor %%mm2, %%mm0 \n\t"
  705. "movq %%mm0, (%%eax, %1) \n\t" // line 2
  706. "movq (%%ebx, %1, 2), %%mm0 \n\t" // line 7
  707. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l7-1 : l7
  708. "paddusb %%mm1, %%mm0 \n\t"
  709. "pxor %%mm2, %%mm0 \n\t"
  710. "movq %%mm0, (%%ebx, %1, 2) \n\t" // line 7
  711. :
  712. : "r" (src), "r" (stride)
  713. : "%eax", "%ebx"
  714. );
  715. #else
  716. const int l1= stride;
  717. const int l2= stride + l1;
  718. const int l3= stride + l2;
  719. const int l4= stride + l3;
  720. const int l5= stride + l4;
  721. const int l6= stride + l5;
  722. const int l7= stride + l6;
  723. // const int l8= stride + l7;
  724. // const int l9= stride + l8;
  725. int x;
  726. src+= stride*3;
  727. for(x=0; x<BLOCK_SIZE; x++)
  728. {
  729. int a= src[l3] - src[l4];
  730. int b= src[l4] - src[l5];
  731. int c= src[l5] - src[l6];
  732. int d= ABS(b) - ((ABS(a) + ABS(c))>>1);
  733. d= MAX(d, 0);
  734. if(d < QP)
  735. {
  736. int v = d * SIGN(-b);
  737. src[l2] +=v>>3;
  738. src[l3] +=v>>2;
  739. src[l4] +=(3*v)>>3;
  740. src[l5] -=(3*v)>>3;
  741. src[l6] -=v>>2;
  742. src[l7] -=v>>3;
  743. }
  744. src++;
  745. }
  746. /*
  747. const int l1= stride;
  748. const int l2= stride + l1;
  749. const int l3= stride + l2;
  750. const int l4= stride + l3;
  751. const int l5= stride + l4;
  752. const int l6= stride + l5;
  753. const int l7= stride + l6;
  754. const int l8= stride + l7;
  755. const int l9= stride + l8;
  756. for(int x=0; x<BLOCK_SIZE; x++)
  757. {
  758. int v2= src[l2];
  759. int v3= src[l3];
  760. int v4= src[l4];
  761. int v5= src[l5];
  762. int v6= src[l6];
  763. int v7= src[l7];
  764. if(ABS(v4-v5)<QP && ABS(v4-v5) - (ABS(v3-v4) + ABS(v5-v6))>0 )
  765. {
  766. src[l3] = (6*v2 + 4*v3 + 3*v4 + 2*v5 + v6 )/16;
  767. src[l4] = (3*v2 + 3*v3 + 4*v4 + 3*v5 + 2*v6 + v7 )/16;
  768. src[l5] = (1*v2 + 2*v3 + 3*v4 + 4*v5 + 3*v6 + 3*v7)/16;
  769. src[l6] = ( 1*v3 + 2*v4 + 3*v5 + 4*v6 + 6*v7)/16;
  770. }
  771. src++;
  772. }
  773. */
  774. #endif
  775. }
  776. /**
  777. * Experimental Filter 1 (Horizontal)
  778. * will not damage linear gradients
  779. * Flat blocks should look like they where passed through the (1,1,2,2,4,2,2,1,1) 9-Tap filter
  780. * can only smooth blocks at the expected locations (it cant smooth them if they did move)
  781. * MMX2 version does correct clipping C version doesnt
  782. * not identical with the vertical one
  783. */
  784. static inline void horizX1Filter(uint8_t *src, int stride, int QP)
  785. {
  786. int y;
  787. static uint64_t *lut= NULL;
  788. if(lut==NULL)
  789. {
  790. int i;
  791. lut= (uint64_t*)memalign(8, 256*8);
  792. for(i=0; i<256; i++)
  793. {
  794. int v= i < 128 ? 2*i : 2*(i-256);
  795. /*
  796. //Simulate 112242211 9-Tap filter
  797. uint64_t a= (v/16) & 0xFF;
  798. uint64_t b= (v/8) & 0xFF;
  799. uint64_t c= (v/4) & 0xFF;
  800. uint64_t d= (3*v/8) & 0xFF;
  801. */
  802. //Simulate piecewise linear interpolation
  803. uint64_t a= (v/16) & 0xFF;
  804. uint64_t b= (v*3/16) & 0xFF;
  805. uint64_t c= (v*5/16) & 0xFF;
  806. uint64_t d= (7*v/16) & 0xFF;
  807. uint64_t A= (0x100 - a)&0xFF;
  808. uint64_t B= (0x100 - b)&0xFF;
  809. uint64_t C= (0x100 - c)&0xFF;
  810. uint64_t D= (0x100 - c)&0xFF;
  811. lut[i] = (a<<56) | (b<<48) | (c<<40) | (d<<32) |
  812. (D<<24) | (C<<16) | (B<<8) | (A);
  813. //lut[i] = (v<<32) | (v<<24);
  814. }
  815. }
  816. #if 0
  817. asm volatile(
  818. "pxor %%mm7, %%mm7 \n\t" // 0
  819. // "movq b80, %%mm6 \n\t" // MIN_SIGNED_BYTE
  820. "leal (%0, %1), %%eax \n\t"
  821. "leal (%%eax, %1, 4), %%ebx \n\t"
  822. "movq b80, %%mm6 \n\t"
  823. "movd pQPb, %%mm5 \n\t" // QP
  824. "movq %%mm5, %%mm4 \n\t"
  825. "paddusb %%mm5, %%mm5 \n\t" // 2QP
  826. "paddusb %%mm5, %%mm4 \n\t" // 3QP
  827. "pxor %%mm5, %%mm5 \n\t" // 0
  828. "psubb %%mm4, %%mm5 \n\t" // -3QP
  829. "por bm11111110, %%mm5 \n\t" // ...,FF,FF,-3QP
  830. "psllq $24, %%mm5 \n\t"
  831. // 0 1 2 3 4 5 6 7 8 9
  832. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  833. #define HX1old(a) \
  834. "movd " #a ", %%mm0 \n\t"\
  835. "movd 4" #a ", %%mm1 \n\t"\
  836. "punpckldq %%mm1, %%mm0 \n\t"\
  837. "movq %%mm0, %%mm1 \n\t"\
  838. "movq %%mm0, %%mm2 \n\t"\
  839. "psrlq $8, %%mm1 \n\t"\
  840. "psubusb %%mm1, %%mm2 \n\t"\
  841. "psubusb %%mm0, %%mm1 \n\t"\
  842. "por %%mm2, %%mm1 \n\t" /* p´x = |px - p(x+1)| */\
  843. "pcmpeqb %%mm7, %%mm2 \n\t" /* p´x = sgn[px - p(x+1)] */\
  844. "pshufw $0x00, %%mm1, %%mm3 \n\t" /* p´5 = |p1 - p2| */\
  845. PAVGB(%%mm1, %%mm3) /* p´5 = (|p2-p1| + |p6-p5|)/2 */\
  846. "psrlq $16, %%mm3 \n\t" /* p´3 = (|p2-p1| + |p6-p5|)/2 */\
  847. "psubusb %%mm3, %%mm1 \n\t" /* |p3-p4|-(|p2-p1| + |p6-p5|)/2 */\
  848. "paddb %%mm5, %%mm1 \n\t"\
  849. "psubusb %%mm5, %%mm1 \n\t"\
  850. PAVGB(%%mm7, %%mm1)\
  851. "pxor %%mm2, %%mm1 \n\t"\
  852. "psubb %%mm2, %%mm1 \n\t"\
  853. "psrlq $24, %%mm1 \n\t"\
  854. "movd %%mm1, %%ecx \n\t"\
  855. "paddb %%mm6, %%mm0 \n\t"\
  856. "paddsb (%3, %%ecx, 8), %%mm0 \n\t"\
  857. "paddb %%mm6, %%mm0 \n\t"\
  858. "movq %%mm0, " #a " \n\t"\
  859. /*
  860. HX1old((%0))
  861. HX1old((%%eax))
  862. HX1old((%%eax, %1))
  863. HX1old((%%eax, %1, 2))
  864. HX1old((%0, %1, 4))
  865. HX1old((%%ebx))
  866. HX1old((%%ebx, %1))
  867. HX1old((%%ebx, %1, 2))
  868. */
  869. //FIXME add some comments, its unreadable ...
  870. #define HX1b(a, c, b, d) \
  871. "movd " #a ", %%mm0 \n\t"\
  872. "movd 4" #a ", %%mm1 \n\t"\
  873. "punpckldq %%mm1, %%mm0 \n\t"\
  874. "movd " #b ", %%mm4 \n\t"\
  875. "movq %%mm0, %%mm1 \n\t"\
  876. "movq %%mm0, %%mm2 \n\t"\
  877. "psrlq $8, %%mm1 \n\t"\
  878. "movd 4" #b ", %%mm3 \n\t"\
  879. "psubusb %%mm1, %%mm2 \n\t"\
  880. "psubusb %%mm0, %%mm1 \n\t"\
  881. "por %%mm2, %%mm1 \n\t" /* p´x = |px - p(x+1)| */\
  882. "pcmpeqb %%mm7, %%mm2 \n\t" /* p´x = sgn[px - p(x+1)] */\
  883. "punpckldq %%mm3, %%mm4 \n\t"\
  884. "movq %%mm1, %%mm3 \n\t"\
  885. "psllq $32, %%mm3 \n\t" /* p´5 = |p1 - p2| */\
  886. PAVGB(%%mm1, %%mm3) /* p´5 = (|p2-p1| + |p6-p5|)/2 */\
  887. "paddb %%mm6, %%mm0 \n\t"\
  888. "psrlq $16, %%mm3 \n\t" /* p´3 = (|p2-p1| + |p6-p5|)/2 */\
  889. "psubusb %%mm3, %%mm1 \n\t" /* |p3-p4|-(|p2-p1| + |p6-p5|)/2 */\
  890. "movq %%mm4, %%mm3 \n\t"\
  891. "paddb %%mm5, %%mm1 \n\t"\
  892. "psubusb %%mm5, %%mm1 \n\t"\
  893. "psrlq $8, %%mm3 \n\t"\
  894. PAVGB(%%mm7, %%mm1)\
  895. "pxor %%mm2, %%mm1 \n\t"\
  896. "psubb %%mm2, %%mm1 \n\t"\
  897. "movq %%mm4, %%mm2 \n\t"\
  898. "psrlq $24, %%mm1 \n\t"\
  899. "psubusb %%mm3, %%mm2 \n\t"\
  900. "movd %%mm1, %%ecx \n\t"\
  901. "psubusb %%mm4, %%mm3 \n\t"\
  902. "paddsb (%2, %%ecx, 8), %%mm0 \n\t"\
  903. "por %%mm2, %%mm3 \n\t" /* p´x = |px - p(x+1)| */\
  904. "paddb %%mm6, %%mm0 \n\t"\
  905. "pcmpeqb %%mm7, %%mm2 \n\t" /* p´x = sgn[px - p(x+1)] */\
  906. "movq %%mm3, %%mm1 \n\t"\
  907. "psllq $32, %%mm1 \n\t" /* p´5 = |p1 - p2| */\
  908. "movq %%mm0, " #a " \n\t"\
  909. PAVGB(%%mm3, %%mm1) /* p´5 = (|p2-p1| + |p6-p5|)/2 */\
  910. "paddb %%mm6, %%mm4 \n\t"\
  911. "psrlq $16, %%mm1 \n\t" /* p´3 = (|p2-p1| + |p6-p5|)/2 */\
  912. "psubusb %%mm1, %%mm3 \n\t" /* |p3-p4|-(|p2-p1| + |p6-p5|)/2 */\
  913. "paddb %%mm5, %%mm3 \n\t"\
  914. "psubusb %%mm5, %%mm3 \n\t"\
  915. PAVGB(%%mm7, %%mm3)\
  916. "pxor %%mm2, %%mm3 \n\t"\
  917. "psubb %%mm2, %%mm3 \n\t"\
  918. "psrlq $24, %%mm3 \n\t"\
  919. "movd " #c ", %%mm0 \n\t"\
  920. "movd 4" #c ", %%mm1 \n\t"\
  921. "punpckldq %%mm1, %%mm0 \n\t"\
  922. "paddb %%mm6, %%mm0 \n\t"\
  923. "paddsb (%2, %%ecx, 8), %%mm0 \n\t"\
  924. "paddb %%mm6, %%mm0 \n\t"\
  925. "movq %%mm0, " #c " \n\t"\
  926. "movd %%mm3, %%ecx \n\t"\
  927. "movd " #d ", %%mm0 \n\t"\
  928. "paddsb (%2, %%ecx, 8), %%mm4 \n\t"\
  929. "movd 4" #d ", %%mm1 \n\t"\
  930. "paddb %%mm6, %%mm4 \n\t"\
  931. "punpckldq %%mm1, %%mm0 \n\t"\
  932. "movq %%mm4, " #b " \n\t"\
  933. "paddb %%mm6, %%mm0 \n\t"\
  934. "paddsb (%2, %%ecx, 8), %%mm0 \n\t"\
  935. "paddb %%mm6, %%mm0 \n\t"\
  936. "movq %%mm0, " #d " \n\t"\
  937. HX1b((%0),(%%eax),(%%eax, %1),(%%eax, %1, 2))
  938. HX1b((%0, %1, 4),(%%ebx),(%%ebx, %1),(%%ebx, %1, 2))
  939. :
  940. : "r" (src), "r" (stride), "r" (lut)
  941. : "%eax", "%ebx", "%ecx"
  942. );
  943. #else
  944. //FIXME (has little in common with the mmx2 version)
  945. for(y=0; y<BLOCK_SIZE; y++)
  946. {
  947. int a= src[1] - src[2];
  948. int b= src[3] - src[4];
  949. int c= src[5] - src[6];
  950. int d= MAX(ABS(b) - (ABS(a) + ABS(c))/2, 0);
  951. if(d < QP)
  952. {
  953. int v = d * SIGN(-b);
  954. src[1] +=v/8;
  955. src[2] +=v/4;
  956. src[3] +=3*v/8;
  957. src[4] -=3*v/8;
  958. src[5] -=v/4;
  959. src[6] -=v/8;
  960. }
  961. src+=stride;
  962. }
  963. #endif
  964. }
  965. static inline void doVertDefFilter(uint8_t src[], int stride, int QP)
  966. {
  967. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  968. /*
  969. uint8_t tmp[16];
  970. const int l1= stride;
  971. const int l2= stride + l1;
  972. const int l3= stride + l2;
  973. const int l4= (int)tmp - (int)src - stride*3;
  974. const int l5= (int)tmp - (int)src - stride*3 + 8;
  975. const int l6= stride*3 + l3;
  976. const int l7= stride + l6;
  977. const int l8= stride + l7;
  978. memcpy(tmp, src+stride*7, 8);
  979. memcpy(tmp+8, src+stride*8, 8);
  980. */
  981. src+= stride*4;
  982. asm volatile(
  983. #if 0 //sligtly more accurate and slightly slower
  984. "pxor %%mm7, %%mm7 \n\t" // 0
  985. "leal (%0, %1), %%eax \n\t"
  986. "leal (%%eax, %1, 4), %%ebx \n\t"
  987. // 0 1 2 3 4 5 6 7
  988. // %0 %0+%1 %0+2%1 eax+2%1 %0+4%1 eax+4%1 ebx+%1 ebx+2%1
  989. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1
  990. "movq (%0, %1, 2), %%mm0 \n\t" // l2
  991. "movq (%0), %%mm1 \n\t" // l0
  992. "movq %%mm0, %%mm2 \n\t" // l2
  993. PAVGB(%%mm7, %%mm0) // ~l2/2
  994. PAVGB(%%mm1, %%mm0) // ~(l2 + 2l0)/4
  995. PAVGB(%%mm2, %%mm0) // ~(5l2 + 2l0)/8
  996. "movq (%%eax), %%mm1 \n\t" // l1
  997. "movq (%%eax, %1, 2), %%mm3 \n\t" // l3
  998. "movq %%mm1, %%mm4 \n\t" // l1
  999. PAVGB(%%mm7, %%mm1) // ~l1/2
  1000. PAVGB(%%mm3, %%mm1) // ~(l1 + 2l3)/4
  1001. PAVGB(%%mm4, %%mm1) // ~(5l1 + 2l3)/8
  1002. "movq %%mm0, %%mm4 \n\t" // ~(5l2 + 2l0)/8
  1003. "psubusb %%mm1, %%mm0 \n\t"
  1004. "psubusb %%mm4, %%mm1 \n\t"
  1005. "por %%mm0, %%mm1 \n\t" // ~|2l0 - 5l1 + 5l2 - 2l3|/8
  1006. // mm1= |lenergy|, mm2= l2, mm3= l3, mm7=0
  1007. "movq (%0, %1, 4), %%mm0 \n\t" // l4
  1008. "movq %%mm0, %%mm4 \n\t" // l4
  1009. PAVGB(%%mm7, %%mm0) // ~l4/2
  1010. PAVGB(%%mm2, %%mm0) // ~(l4 + 2l2)/4
  1011. PAVGB(%%mm4, %%mm0) // ~(5l4 + 2l2)/8
  1012. "movq (%%ebx), %%mm2 \n\t" // l5
  1013. "movq %%mm3, %%mm5 \n\t" // l3
  1014. PAVGB(%%mm7, %%mm3) // ~l3/2
  1015. PAVGB(%%mm2, %%mm3) // ~(l3 + 2l5)/4
  1016. PAVGB(%%mm5, %%mm3) // ~(5l3 + 2l5)/8
  1017. "movq %%mm0, %%mm6 \n\t" // ~(5l4 + 2l2)/8
  1018. "psubusb %%mm3, %%mm0 \n\t"
  1019. "psubusb %%mm6, %%mm3 \n\t"
  1020. "por %%mm0, %%mm3 \n\t" // ~|2l2 - 5l3 + 5l4 - 2l5|/8
  1021. "pcmpeqb %%mm7, %%mm0 \n\t" // SIGN(2l2 - 5l3 + 5l4 - 2l5)
  1022. // mm0= SIGN(menergy), mm1= |lenergy|, mm2= l5, mm3= |menergy|, mm4=l4, mm5= l3, mm7=0
  1023. "movq (%%ebx, %1), %%mm6 \n\t" // l6
  1024. "movq %%mm6, %%mm5 \n\t" // l6
  1025. PAVGB(%%mm7, %%mm6) // ~l6/2
  1026. PAVGB(%%mm4, %%mm6) // ~(l6 + 2l4)/4
  1027. PAVGB(%%mm5, %%mm6) // ~(5l6 + 2l4)/8
  1028. "movq (%%ebx, %1, 2), %%mm5 \n\t" // l7
  1029. "movq %%mm2, %%mm4 \n\t" // l5
  1030. PAVGB(%%mm7, %%mm2) // ~l5/2
  1031. PAVGB(%%mm5, %%mm2) // ~(l5 + 2l7)/4
  1032. PAVGB(%%mm4, %%mm2) // ~(5l5 + 2l7)/8
  1033. "movq %%mm6, %%mm4 \n\t" // ~(5l6 + 2l4)/8
  1034. "psubusb %%mm2, %%mm6 \n\t"
  1035. "psubusb %%mm4, %%mm2 \n\t"
  1036. "por %%mm6, %%mm2 \n\t" // ~|2l4 - 5l5 + 5l6 - 2l7|/8
  1037. // mm0= SIGN(menergy), mm1= |lenergy|/8, mm2= |renergy|/8, mm3= |menergy|/8, mm7=0
  1038. PMINUB(%%mm2, %%mm1, %%mm4) // MIN(|lenergy|,|renergy|)/8
  1039. "movq pQPb, %%mm4 \n\t" // QP //FIXME QP+1 ?
  1040. "paddusb b01, %%mm4 \n\t"
  1041. "pcmpgtb %%mm3, %%mm4 \n\t" // |menergy|/8 < QP
  1042. "psubusb %%mm1, %%mm3 \n\t" // d=|menergy|/8-MIN(|lenergy|,|renergy|)/8
  1043. "pand %%mm4, %%mm3 \n\t"
  1044. "movq %%mm3, %%mm1 \n\t"
  1045. // "psubusb b01, %%mm3 \n\t"
  1046. PAVGB(%%mm7, %%mm3)
  1047. PAVGB(%%mm7, %%mm3)
  1048. "paddusb %%mm1, %%mm3 \n\t"
  1049. // "paddusb b01, %%mm3 \n\t"
  1050. "movq (%%eax, %1, 2), %%mm6 \n\t" //l3
  1051. "movq (%0, %1, 4), %%mm5 \n\t" //l4
  1052. "movq (%0, %1, 4), %%mm4 \n\t" //l4
  1053. "psubusb %%mm6, %%mm5 \n\t"
  1054. "psubusb %%mm4, %%mm6 \n\t"
  1055. "por %%mm6, %%mm5 \n\t" // |l3-l4|
  1056. "pcmpeqb %%mm7, %%mm6 \n\t" // SIGN(l3-l4)
  1057. "pxor %%mm6, %%mm0 \n\t"
  1058. "pand %%mm0, %%mm3 \n\t"
  1059. PMINUB(%%mm5, %%mm3, %%mm0)
  1060. "psubusb b01, %%mm3 \n\t"
  1061. PAVGB(%%mm7, %%mm3)
  1062. "movq (%%eax, %1, 2), %%mm0 \n\t"
  1063. "movq (%0, %1, 4), %%mm2 \n\t"
  1064. "pxor %%mm6, %%mm0 \n\t"
  1065. "pxor %%mm6, %%mm2 \n\t"
  1066. "psubb %%mm3, %%mm0 \n\t"
  1067. "paddb %%mm3, %%mm2 \n\t"
  1068. "pxor %%mm6, %%mm0 \n\t"
  1069. "pxor %%mm6, %%mm2 \n\t"
  1070. "movq %%mm0, (%%eax, %1, 2) \n\t"
  1071. "movq %%mm2, (%0, %1, 4) \n\t"
  1072. #endif
  1073. "leal (%0, %1), %%eax \n\t"
  1074. "pcmpeqb %%mm6, %%mm6 \n\t" // -1
  1075. // 0 1 2 3 4 5 6 7
  1076. // %0 %0+%1 %0+2%1 eax+2%1 %0+4%1 eax+4%1 ebx+%1 ebx+2%1
  1077. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1
  1078. "movq (%%eax, %1, 2), %%mm1 \n\t" // l3
  1079. "movq (%0, %1, 4), %%mm0 \n\t" // l4
  1080. "pxor %%mm6, %%mm1 \n\t" // -l3-1
  1081. PAVGB(%%mm1, %%mm0) // -q+128 = (l4-l3+256)/2
  1082. // mm1=-l3-1, mm0=128-q
  1083. "movq (%%eax, %1, 4), %%mm2 \n\t" // l5
  1084. "movq (%%eax, %1), %%mm3 \n\t" // l2
  1085. "pxor %%mm6, %%mm2 \n\t" // -l5-1
  1086. "movq %%mm2, %%mm5 \n\t" // -l5-1
  1087. "movq b80, %%mm4 \n\t" // 128
  1088. "leal (%%eax, %1, 4), %%ebx \n\t"
  1089. PAVGB(%%mm3, %%mm2) // (l2-l5+256)/2
  1090. PAVGB(%%mm0, %%mm4) // ~(l4-l3)/4 + 128
  1091. PAVGB(%%mm2, %%mm4) // ~(l2-l5)/4 +(l4-l3)/8 + 128
  1092. PAVGB(%%mm0, %%mm4) // ~(l2-l5)/8 +5(l4-l3)/16 + 128
  1093. // mm1=-l3-1, mm0=128-q, mm3=l2, mm4=menergy/16 + 128, mm5= -l5-1
  1094. "movq (%%eax), %%mm2 \n\t" // l1
  1095. "pxor %%mm6, %%mm2 \n\t" // -l1-1
  1096. PAVGB(%%mm3, %%mm2) // (l2-l1+256)/2
  1097. PAVGB((%0), %%mm1) // (l0-l3+256)/2
  1098. "movq b80, %%mm3 \n\t" // 128
  1099. PAVGB(%%mm2, %%mm3) // ~(l2-l1)/4 + 128
  1100. PAVGB(%%mm1, %%mm3) // ~(l0-l3)/4 +(l2-l1)/8 + 128
  1101. PAVGB(%%mm2, %%mm3) // ~(l0-l3)/8 +5(l2-l1)/16 + 128
  1102. // mm0=128-q, mm3=lenergy/16 + 128, mm4= menergy/16 + 128, mm5= -l5-1
  1103. PAVGB((%%ebx, %1), %%mm5) // (l6-l5+256)/2
  1104. "movq (%%ebx, %1, 2), %%mm1 \n\t" // l7
  1105. "pxor %%mm6, %%mm1 \n\t" // -l7-1
  1106. PAVGB((%0, %1, 4), %%mm1) // (l4-l7+256)/2
  1107. "movq b80, %%mm2 \n\t" // 128
  1108. PAVGB(%%mm5, %%mm2) // ~(l6-l5)/4 + 128
  1109. PAVGB(%%mm1, %%mm2) // ~(l4-l7)/4 +(l6-l5)/8 + 128
  1110. PAVGB(%%mm5, %%mm2) // ~(l4-l7)/8 +5(l6-l5)/16 + 128
  1111. // mm0=128-q, mm2=renergy/16 + 128, mm3=lenergy/16 + 128, mm4= menergy/16 + 128
  1112. "movq b00, %%mm1 \n\t" // 0
  1113. "movq b00, %%mm5 \n\t" // 0
  1114. "psubb %%mm2, %%mm1 \n\t" // 128 - renergy/16
  1115. "psubb %%mm3, %%mm5 \n\t" // 128 - lenergy/16
  1116. PMAXUB(%%mm1, %%mm2) // 128 + |renergy/16|
  1117. PMAXUB(%%mm5, %%mm3) // 128 + |lenergy/16|
  1118. PMINUB(%%mm2, %%mm3, %%mm1) // 128 + MIN(|lenergy|,|renergy|)/16
  1119. // mm0=128-q, mm3=128 + MIN(|lenergy|,|renergy|)/16, mm4= menergy/16 + 128
  1120. "movq b00, %%mm7 \n\t" // 0
  1121. "movq pQPb, %%mm2 \n\t" // QP
  1122. PAVGB(%%mm6, %%mm2) // 128 + QP/2
  1123. "psubb %%mm6, %%mm2 \n\t"
  1124. "movq %%mm4, %%mm1 \n\t"
  1125. "pcmpgtb %%mm7, %%mm1 \n\t" // SIGN(menergy)
  1126. "pxor %%mm1, %%mm4 \n\t"
  1127. "psubb %%mm1, %%mm4 \n\t" // 128 + |menergy|/16
  1128. "pcmpgtb %%mm4, %%mm2 \n\t" // |menergy|/16 < QP/2
  1129. "psubusb %%mm3, %%mm4 \n\t" //d=|menergy|/16 - MIN(|lenergy|,|renergy|)/16
  1130. // mm0=128-q, mm1= SIGN(menergy), mm2= |menergy|/16 < QP/2, mm4= d/16
  1131. "movq %%mm4, %%mm3 \n\t" // d
  1132. "psubusb b01, %%mm4 \n\t"
  1133. PAVGB(%%mm7, %%mm4) // d/32
  1134. PAVGB(%%mm7, %%mm4) // (d + 32)/64
  1135. "paddb %%mm3, %%mm4 \n\t" // 5d/64
  1136. "pand %%mm2, %%mm4 \n\t"
  1137. "movq b80, %%mm5 \n\t" // 128
  1138. "psubb %%mm0, %%mm5 \n\t" // q
  1139. "paddsb %%mm6, %%mm5 \n\t" // fix bad rounding
  1140. "pcmpgtb %%mm5, %%mm7 \n\t" // SIGN(q)
  1141. "pxor %%mm7, %%mm5 \n\t"
  1142. PMINUB(%%mm5, %%mm4, %%mm3) // MIN(|q|, 5d/64)
  1143. "pxor %%mm1, %%mm7 \n\t" // SIGN(d*q)
  1144. "pand %%mm7, %%mm4 \n\t"
  1145. "movq (%%eax, %1, 2), %%mm0 \n\t"
  1146. "movq (%0, %1, 4), %%mm2 \n\t"
  1147. "pxor %%mm1, %%mm0 \n\t"
  1148. "pxor %%mm1, %%mm2 \n\t"
  1149. "paddb %%mm4, %%mm0 \n\t"
  1150. "psubb %%mm4, %%mm2 \n\t"
  1151. "pxor %%mm1, %%mm0 \n\t"
  1152. "pxor %%mm1, %%mm2 \n\t"
  1153. "movq %%mm0, (%%eax, %1, 2) \n\t"
  1154. "movq %%mm2, (%0, %1, 4) \n\t"
  1155. :
  1156. : "r" (src), "r" (stride)
  1157. : "%eax", "%ebx"
  1158. );
  1159. /*
  1160. {
  1161. int x;
  1162. src-= stride;
  1163. for(x=0; x<BLOCK_SIZE; x++)
  1164. {
  1165. const int middleEnergy= 5*(src[l5] - src[l4]) + 2*(src[l3] - src[l6]);
  1166. if(ABS(middleEnergy)< 8*QP)
  1167. {
  1168. const int q=(src[l4] - src[l5])/2;
  1169. const int leftEnergy= 5*(src[l3] - src[l2]) + 2*(src[l1] - src[l4]);
  1170. const int rightEnergy= 5*(src[l7] - src[l6]) + 2*(src[l5] - src[l8]);
  1171. int d= ABS(middleEnergy) - MIN( ABS(leftEnergy), ABS(rightEnergy) );
  1172. d= MAX(d, 0);
  1173. d= (5*d + 32) >> 6;
  1174. d*= SIGN(-middleEnergy);
  1175. if(q>0)
  1176. {
  1177. d= d<0 ? 0 : d;
  1178. d= d>q ? q : d;
  1179. }
  1180. else
  1181. {
  1182. d= d>0 ? 0 : d;
  1183. d= d<q ? q : d;
  1184. }
  1185. src[l4]-= d;
  1186. src[l5]+= d;
  1187. }
  1188. src++;
  1189. }
  1190. src-=8;
  1191. for(x=0; x<8; x++)
  1192. {
  1193. int y;
  1194. for(y=4; y<6; y++)
  1195. {
  1196. int d= src[x+y*stride] - tmp[x+(y-4)*8];
  1197. int ad= ABS(d);
  1198. static int max=0;
  1199. static int sum=0;
  1200. static int num=0;
  1201. static int bias=0;
  1202. if(max<ad) max=ad;
  1203. sum+= ad>3 ? 1 : 0;
  1204. if(ad>3)
  1205. {
  1206. src[0] = src[7] = src[stride*7] = src[(stride+1)*7]=255;
  1207. }
  1208. if(y==4) bias+=d;
  1209. num++;
  1210. if(num%1000000 == 0)
  1211. {
  1212. printf(" %d %d %d %d\n", num, sum, max, bias);
  1213. }
  1214. }
  1215. }
  1216. }
  1217. */
  1218. #elif defined (HAVE_MMX)
  1219. src+= stride*4;
  1220. asm volatile(
  1221. "pxor %%mm7, %%mm7 \n\t"
  1222. "leal (%0, %1), %%eax \n\t"
  1223. "leal (%%eax, %1, 4), %%ebx \n\t"
  1224. // 0 1 2 3 4 5 6 7
  1225. // %0 %0+%1 %0+2%1 eax+2%1 %0+4%1 eax+4%1 ebx+%1 ebx+2%1
  1226. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1
  1227. "movq (%0), %%mm0 \n\t"
  1228. "movq %%mm0, %%mm1 \n\t"
  1229. "punpcklbw %%mm7, %%mm0 \n\t" // low part of line 0
  1230. "punpckhbw %%mm7, %%mm1 \n\t" // high part of line 0
  1231. "movq (%%eax), %%mm2 \n\t"
  1232. "movq %%mm2, %%mm3 \n\t"
  1233. "punpcklbw %%mm7, %%mm2 \n\t" // low part of line 1
  1234. "punpckhbw %%mm7, %%mm3 \n\t" // high part of line 1
  1235. "movq (%%eax, %1), %%mm4 \n\t"
  1236. "movq %%mm4, %%mm5 \n\t"
  1237. "punpcklbw %%mm7, %%mm4 \n\t" // low part of line 2
  1238. "punpckhbw %%mm7, %%mm5 \n\t" // high part of line 2
  1239. "paddw %%mm0, %%mm0 \n\t" // 2L0
  1240. "paddw %%mm1, %%mm1 \n\t" // 2H0
  1241. "psubw %%mm4, %%mm2 \n\t" // L1 - L2
  1242. "psubw %%mm5, %%mm3 \n\t" // H1 - H2
  1243. "psubw %%mm2, %%mm0 \n\t" // 2L0 - L1 + L2
  1244. "psubw %%mm3, %%mm1 \n\t" // 2H0 - H1 + H2
  1245. "psllw $2, %%mm2 \n\t" // 4L1 - 4L2
  1246. "psllw $2, %%mm3 \n\t" // 4H1 - 4H2
  1247. "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2
  1248. "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2
  1249. "movq (%%eax, %1, 2), %%mm2 \n\t"
  1250. "movq %%mm2, %%mm3 \n\t"
  1251. "punpcklbw %%mm7, %%mm2 \n\t" // L3
  1252. "punpckhbw %%mm7, %%mm3 \n\t" // H3
  1253. "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2 - L3
  1254. "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2 - H3
  1255. "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
  1256. "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
  1257. "movq %%mm0, temp0 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
  1258. "movq %%mm1, temp1 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
  1259. "movq (%0, %1, 4), %%mm0 \n\t"
  1260. "movq %%mm0, %%mm1 \n\t"
  1261. "punpcklbw %%mm7, %%mm0 \n\t" // L4
  1262. "punpckhbw %%mm7, %%mm1 \n\t" // H4
  1263. "psubw %%mm0, %%mm2 \n\t" // L3 - L4
  1264. "psubw %%mm1, %%mm3 \n\t" // H3 - H4
  1265. "movq %%mm2, temp2 \n\t" // L3 - L4
  1266. "movq %%mm3, temp3 \n\t" // H3 - H4
  1267. "paddw %%mm4, %%mm4 \n\t" // 2L2
  1268. "paddw %%mm5, %%mm5 \n\t" // 2H2
  1269. "psubw %%mm2, %%mm4 \n\t" // 2L2 - L3 + L4
  1270. "psubw %%mm3, %%mm5 \n\t" // 2H2 - H3 + H4
  1271. "psllw $2, %%mm2 \n\t" // 4L3 - 4L4
  1272. "psllw $2, %%mm3 \n\t" // 4H3 - 4H4
  1273. "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4
  1274. "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4
  1275. //50 opcodes so far
  1276. "movq (%%ebx), %%mm2 \n\t"
  1277. "movq %%mm2, %%mm3 \n\t"
  1278. "punpcklbw %%mm7, %%mm2 \n\t" // L5
  1279. "punpckhbw %%mm7, %%mm3 \n\t" // H5
  1280. "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4 - L5
  1281. "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4 - H5
  1282. "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4 - 2L5
  1283. "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4 - 2H5
  1284. "movq (%%ebx, %1), %%mm6 \n\t"
  1285. "punpcklbw %%mm7, %%mm6 \n\t" // L6
  1286. "psubw %%mm6, %%mm2 \n\t" // L5 - L6
  1287. "movq (%%ebx, %1), %%mm6 \n\t"
  1288. "punpckhbw %%mm7, %%mm6 \n\t" // H6
  1289. "psubw %%mm6, %%mm3 \n\t" // H5 - H6
  1290. "paddw %%mm0, %%mm0 \n\t" // 2L4
  1291. "paddw %%mm1, %%mm1 \n\t" // 2H4
  1292. "psubw %%mm2, %%mm0 \n\t" // 2L4 - L5 + L6
  1293. "psubw %%mm3, %%mm1 \n\t" // 2H4 - H5 + H6
  1294. "psllw $2, %%mm2 \n\t" // 4L5 - 4L6
  1295. "psllw $2, %%mm3 \n\t" // 4H5 - 4H6
  1296. "psubw %%mm2, %%mm0 \n\t" // 2L4 - 5L5 + 5L6
  1297. "psubw %%mm3, %%mm1 \n\t" // 2H4 - 5H5 + 5H6
  1298. "movq (%%ebx, %1, 2), %%mm2 \n\t"
  1299. "movq %%mm2, %%mm3 \n\t"
  1300. "punpcklbw %%mm7, %%mm2 \n\t" // L7
  1301. "punpckhbw %%mm7, %%mm3 \n\t" // H7
  1302. "paddw %%mm2, %%mm2 \n\t" // 2L7
  1303. "paddw %%mm3, %%mm3 \n\t" // 2H7
  1304. "psubw %%mm2, %%mm0 \n\t" // 2L4 - 5L5 + 5L6 - 2L7
  1305. "psubw %%mm3, %%mm1 \n\t" // 2H4 - 5H5 + 5H6 - 2H7
  1306. "movq temp0, %%mm2 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
  1307. "movq temp1, %%mm3 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
  1308. #ifdef HAVE_MMX2
  1309. "movq %%mm7, %%mm6 \n\t" // 0
  1310. "psubw %%mm0, %%mm6 \n\t"
  1311. "pmaxsw %%mm6, %%mm0 \n\t" // |2L4 - 5L5 + 5L6 - 2L7|
  1312. "movq %%mm7, %%mm6 \n\t" // 0
  1313. "psubw %%mm1, %%mm6 \n\t"
  1314. "pmaxsw %%mm6, %%mm1 \n\t" // |2H4 - 5H5 + 5H6 - 2H7|
  1315. "movq %%mm7, %%mm6 \n\t" // 0
  1316. "psubw %%mm2, %%mm6 \n\t"
  1317. "pmaxsw %%mm6, %%mm2 \n\t" // |2L0 - 5L1 + 5L2 - 2L3|
  1318. "movq %%mm7, %%mm6 \n\t" // 0
  1319. "psubw %%mm3, %%mm6 \n\t"
  1320. "pmaxsw %%mm6, %%mm3 \n\t" // |2H0 - 5H1 + 5H2 - 2H3|
  1321. #else
  1322. "movq %%mm7, %%mm6 \n\t" // 0
  1323. "pcmpgtw %%mm0, %%mm6 \n\t"
  1324. "pxor %%mm6, %%mm0 \n\t"
  1325. "psubw %%mm6, %%mm0 \n\t" // |2L4 - 5L5 + 5L6 - 2L7|
  1326. "movq %%mm7, %%mm6 \n\t" // 0
  1327. "pcmpgtw %%mm1, %%mm6 \n\t"
  1328. "pxor %%mm6, %%mm1 \n\t"
  1329. "psubw %%mm6, %%mm1 \n\t" // |2H4 - 5H5 + 5H6 - 2H7|
  1330. "movq %%mm7, %%mm6 \n\t" // 0
  1331. "pcmpgtw %%mm2, %%mm6 \n\t"
  1332. "pxor %%mm6, %%mm2 \n\t"
  1333. "psubw %%mm6, %%mm2 \n\t" // |2L0 - 5L1 + 5L2 - 2L3|
  1334. "movq %%mm7, %%mm6 \n\t" // 0
  1335. "pcmpgtw %%mm3, %%mm6 \n\t"
  1336. "pxor %%mm6, %%mm3 \n\t"
  1337. "psubw %%mm6, %%mm3 \n\t" // |2H0 - 5H1 + 5H2 - 2H3|
  1338. #endif
  1339. #ifdef HAVE_MMX2
  1340. "pminsw %%mm2, %%mm0 \n\t"
  1341. "pminsw %%mm3, %%mm1 \n\t"
  1342. #else
  1343. "movq %%mm0, %%mm6 \n\t"
  1344. "psubusw %%mm2, %%mm6 \n\t"
  1345. "psubw %%mm6, %%mm0 \n\t"
  1346. "movq %%mm1, %%mm6 \n\t"
  1347. "psubusw %%mm3, %%mm6 \n\t"
  1348. "psubw %%mm6, %%mm1 \n\t"
  1349. #endif
  1350. "movq %%mm7, %%mm6 \n\t" // 0
  1351. "pcmpgtw %%mm4, %%mm6 \n\t" // sign(2L2 - 5L3 + 5L4 - 2L5)
  1352. "pxor %%mm6, %%mm4 \n\t"
  1353. "psubw %%mm6, %%mm4 \n\t" // |2L2 - 5L3 + 5L4 - 2L5|
  1354. "pcmpgtw %%mm5, %%mm7 \n\t" // sign(2H2 - 5H3 + 5H4 - 2H5)
  1355. "pxor %%mm7, %%mm5 \n\t"
  1356. "psubw %%mm7, %%mm5 \n\t" // |2H2 - 5H3 + 5H4 - 2H5|
  1357. // 100 opcodes
  1358. "movd %2, %%mm2 \n\t" // QP
  1359. "punpcklwd %%mm2, %%mm2 \n\t"
  1360. "punpcklwd %%mm2, %%mm2 \n\t"
  1361. "psllw $3, %%mm2 \n\t" // 8QP
  1362. "movq %%mm2, %%mm3 \n\t" // 8QP
  1363. "pcmpgtw %%mm4, %%mm2 \n\t"
  1364. "pcmpgtw %%mm5, %%mm3 \n\t"
  1365. "pand %%mm2, %%mm4 \n\t"
  1366. "pand %%mm3, %%mm5 \n\t"
  1367. "psubusw %%mm0, %%mm4 \n\t" // hd
  1368. "psubusw %%mm1, %%mm5 \n\t" // ld
  1369. "movq w05, %%mm2 \n\t" // 5
  1370. "pmullw %%mm2, %%mm4 \n\t"
  1371. "pmullw %%mm2, %%mm5 \n\t"
  1372. "movq w20, %%mm2 \n\t" // 32
  1373. "paddw %%mm2, %%mm4 \n\t"
  1374. "paddw %%mm2, %%mm5 \n\t"
  1375. "psrlw $6, %%mm4 \n\t"
  1376. "psrlw $6, %%mm5 \n\t"
  1377. /*
  1378. "movq w06, %%mm2 \n\t" // 6
  1379. "paddw %%mm2, %%mm4 \n\t"
  1380. "paddw %%mm2, %%mm5 \n\t"
  1381. "movq w1400, %%mm2 \n\t" // 1400h = 5120 = 5/64*2^16
  1382. //FIXME if *5/64 is supposed to be /13 then we should use 5041 instead of 5120
  1383. "pmulhw %%mm2, %%mm4 \n\t" // hd/13
  1384. "pmulhw %%mm2, %%mm5 \n\t" // ld/13
  1385. */
  1386. "movq temp2, %%mm0 \n\t" // L3 - L4
  1387. "movq temp3, %%mm1 \n\t" // H3 - H4
  1388. "pxor %%mm2, %%mm2 \n\t"
  1389. "pxor %%mm3, %%mm3 \n\t"
  1390. "pcmpgtw %%mm0, %%mm2 \n\t" // sign (L3-L4)
  1391. "pcmpgtw %%mm1, %%mm3 \n\t" // sign (H3-H4)
  1392. "pxor %%mm2, %%mm0 \n\t"
  1393. "pxor %%mm3, %%mm1 \n\t"
  1394. "psubw %%mm2, %%mm0 \n\t" // |L3-L4|
  1395. "psubw %%mm3, %%mm1 \n\t" // |H3-H4|
  1396. "psrlw $1, %%mm0 \n\t" // |L3 - L4|/2
  1397. "psrlw $1, %%mm1 \n\t" // |H3 - H4|/2
  1398. "pxor %%mm6, %%mm2 \n\t"
  1399. "pxor %%mm7, %%mm3 \n\t"
  1400. "pand %%mm2, %%mm4 \n\t"
  1401. "pand %%mm3, %%mm5 \n\t"
  1402. #ifdef HAVE_MMX2
  1403. "pminsw %%mm0, %%mm4 \n\t"
  1404. "pminsw %%mm1, %%mm5 \n\t"
  1405. #else
  1406. "movq %%mm4, %%mm2 \n\t"
  1407. "psubusw %%mm0, %%mm2 \n\t"
  1408. "psubw %%mm2, %%mm4 \n\t"
  1409. "movq %%mm5, %%mm2 \n\t"
  1410. "psubusw %%mm1, %%mm2 \n\t"
  1411. "psubw %%mm2, %%mm5 \n\t"
  1412. #endif
  1413. "pxor %%mm6, %%mm4 \n\t"
  1414. "pxor %%mm7, %%mm5 \n\t"
  1415. "psubw %%mm6, %%mm4 \n\t"
  1416. "psubw %%mm7, %%mm5 \n\t"
  1417. "packsswb %%mm5, %%mm4 \n\t"
  1418. "movq (%%eax, %1, 2), %%mm0 \n\t"
  1419. "paddb %%mm4, %%mm0 \n\t"
  1420. "movq %%mm0, (%%eax, %1, 2) \n\t"
  1421. "movq (%0, %1, 4), %%mm0 \n\t"
  1422. "psubb %%mm4, %%mm0 \n\t"
  1423. "movq %%mm0, (%0, %1, 4) \n\t"
  1424. :
  1425. : "r" (src), "r" (stride), "r" (QP)
  1426. : "%eax", "%ebx"
  1427. );
  1428. #else
  1429. const int l1= stride;
  1430. const int l2= stride + l1;
  1431. const int l3= stride + l2;
  1432. const int l4= stride + l3;
  1433. const int l5= stride + l4;
  1434. const int l6= stride + l5;
  1435. const int l7= stride + l6;
  1436. const int l8= stride + l7;
  1437. // const int l9= stride + l8;
  1438. int x;
  1439. src+= stride*3;
  1440. for(x=0; x<BLOCK_SIZE; x++)
  1441. {
  1442. const int middleEnergy= 5*(src[l5] - src[l4]) + 2*(src[l3] - src[l6]);
  1443. if(ABS(middleEnergy) < 8*QP)
  1444. {
  1445. const int q=(src[l4] - src[l5])/2;
  1446. const int leftEnergy= 5*(src[l3] - src[l2]) + 2*(src[l1] - src[l4]);
  1447. const int rightEnergy= 5*(src[l7] - src[l6]) + 2*(src[l5] - src[l8]);
  1448. int d= ABS(middleEnergy) - MIN( ABS(leftEnergy), ABS(rightEnergy) );
  1449. d= MAX(d, 0);
  1450. d= (5*d + 32) >> 6;
  1451. d*= SIGN(-middleEnergy);
  1452. if(q>0)
  1453. {
  1454. d= d<0 ? 0 : d;
  1455. d= d>q ? q : d;
  1456. }
  1457. else
  1458. {
  1459. d= d>0 ? 0 : d;
  1460. d= d<q ? q : d;
  1461. }
  1462. src[l4]-= d;
  1463. src[l5]+= d;
  1464. }
  1465. src++;
  1466. }
  1467. #endif
  1468. }
  1469. //FIXME? |255-0| = 1
  1470. /**
  1471. * Check if the given 8x8 Block is mostly "flat"
  1472. */
  1473. static inline int isHorizDC(uint8_t src[], int stride)
  1474. {
  1475. // src++;
  1476. int numEq= 0;
  1477. #if 0
  1478. asm volatile (
  1479. // "int $3 \n\t"
  1480. "leal (%1, %2), %%ecx \n\t"
  1481. "leal (%%ecx, %2, 4), %%ebx \n\t"
  1482. // 0 1 2 3 4 5 6 7 8 9
  1483. // %1 ecx ecx+%2 ecx+2%2 %1+4%2 ebx ebx+%2 ebx+2%2 %1+8%2 ebx+4%2
  1484. "movq b7E, %%mm7 \n\t" // mm7 = 0x7F
  1485. "movq b7C, %%mm6 \n\t" // mm6 = 0x7D
  1486. "pxor %%mm0, %%mm0 \n\t"
  1487. "movl %1, %%eax \n\t"
  1488. "andl $0x1F, %%eax \n\t"
  1489. "cmpl $24, %%eax \n\t"
  1490. "leal tempBlock, %%eax \n\t"
  1491. "jb 1f \n\t"
  1492. #define HDC_CHECK_AND_CPY(src, dst) \
  1493. "movd " #src ", %%mm2 \n\t"\
  1494. "punpckldq 4" #src ", %%mm2 \n\t" /* (%1) */\
  1495. "movq %%mm2, %%mm1 \n\t"\
  1496. "psrlq $8, %%mm2 \n\t"\
  1497. "psubb %%mm1, %%mm2 \n\t"\
  1498. "paddb %%mm7, %%mm2 \n\t"\
  1499. "pcmpgtb %%mm6, %%mm2 \n\t"\
  1500. "paddb %%mm2, %%mm0 \n\t"\
  1501. "movq %%mm1," #dst "(%%eax) \n\t"
  1502. HDC_CHECK_AND_CPY((%1),0)
  1503. HDC_CHECK_AND_CPY((%%ecx),8)
  1504. HDC_CHECK_AND_CPY((%%ecx, %2),16)
  1505. HDC_CHECK_AND_CPY((%%ecx, %2, 2),24)
  1506. HDC_CHECK_AND_CPY((%1, %2, 4),32)
  1507. HDC_CHECK_AND_CPY((%%ebx),40)
  1508. HDC_CHECK_AND_CPY((%%ebx, %2),48)
  1509. HDC_CHECK_AND_CPY((%%ebx, %2, 2),56)
  1510. "jmp 2f \n\t"
  1511. "1: \n\t"
  1512. // src does not cross a 32 byte cache line so dont waste time with alignment
  1513. #define HDC_CHECK_AND_CPY2(src, dst) \
  1514. "movq " #src ", %%mm2 \n\t"\
  1515. "movq " #src ", %%mm1 \n\t"\
  1516. "psrlq $8, %%mm2 \n\t"\
  1517. "psubb %%mm1, %%mm2 \n\t"\
  1518. "paddb %%mm7, %%mm2 \n\t"\
  1519. "pcmpgtb %%mm6, %%mm2 \n\t"\
  1520. "paddb %%mm2, %%mm0 \n\t"\
  1521. "movq %%mm1," #dst "(%%eax) \n\t"
  1522. HDC_CHECK_AND_CPY2((%1),0)
  1523. HDC_CHECK_AND_CPY2((%%ecx),8)
  1524. HDC_CHECK_AND_CPY2((%%ecx, %2),16)
  1525. HDC_CHECK_AND_CPY2((%%ecx, %2, 2),24)
  1526. HDC_CHECK_AND_CPY2((%1, %2, 4),32)
  1527. HDC_CHECK_AND_CPY2((%%ebx),40)
  1528. HDC_CHECK_AND_CPY2((%%ebx, %2),48)
  1529. HDC_CHECK_AND_CPY2((%%ebx, %2, 2),56)
  1530. "2: \n\t"
  1531. "psllq $8, %%mm0 \n\t" // remove dummy value
  1532. "movq %%mm0, %%mm1 \n\t"
  1533. "psrlw $8, %%mm0 \n\t"
  1534. "paddb %%mm1, %%mm0 \n\t"
  1535. "movq %%mm0, %%mm1 \n\t"
  1536. "psrlq $16, %%mm0 \n\t"
  1537. "paddb %%mm1, %%mm0 \n\t"
  1538. "movq %%mm0, %%mm1 \n\t"
  1539. "psrlq $32, %%mm0 \n\t"
  1540. "paddb %%mm1, %%mm0 \n\t"
  1541. "movd %%mm0, %0 \n\t"
  1542. : "=r" (numEq)
  1543. : "r" (src), "r" (stride)
  1544. : "%eax", "%ebx", "%ecx"
  1545. );
  1546. // printf("%d\n", numEq);
  1547. numEq= (256 - numEq) &0xFF;
  1548. #else
  1549. int y;
  1550. for(y=0; y<BLOCK_SIZE; y++)
  1551. {
  1552. if(((src[0] - src[1] + 1) & 0xFFFF) < 3) numEq++;
  1553. if(((src[1] - src[2] + 1) & 0xFFFF) < 3) numEq++;
  1554. if(((src[2] - src[3] + 1) & 0xFFFF) < 3) numEq++;
  1555. if(((src[3] - src[4] + 1) & 0xFFFF) < 3) numEq++;
  1556. if(((src[4] - src[5] + 1) & 0xFFFF) < 3) numEq++;
  1557. if(((src[5] - src[6] + 1) & 0xFFFF) < 3) numEq++;
  1558. if(((src[6] - src[7] + 1) & 0xFFFF) < 3) numEq++;
  1559. src+= stride;
  1560. }
  1561. #endif
  1562. /* if(abs(numEq - asmEq) > 0)
  1563. {
  1564. // printf("\nasm:%d c:%d\n", asmEq, numEq);
  1565. for(int y=0; y<8; y++)
  1566. {
  1567. for(int x=0; x<8; x++)
  1568. {
  1569. printf("%d ", src[x + y*stride]);
  1570. }
  1571. printf("\n");
  1572. }
  1573. }
  1574. */
  1575. // printf("%d\n", numEq);
  1576. return numEq > hFlatnessThreshold;
  1577. }
  1578. static inline int isHorizMinMaxOk(uint8_t src[], int stride, int QP)
  1579. {
  1580. if(abs(src[0] - src[7]) > 2*QP) return 0;
  1581. return 1;
  1582. }
  1583. static inline void doHorizDefFilter(uint8_t dst[], int stride, int QP)
  1584. {
  1585. #if 0
  1586. asm volatile(
  1587. "leal (%0, %1), %%ecx \n\t"
  1588. "leal (%%ecx, %1, 4), %%ebx \n\t"
  1589. // 0 1 2 3 4 5 6 7 8 9
  1590. // %0 ecx ecx+%1 ecx+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1591. "pxor %%mm7, %%mm7 \n\t"
  1592. "movq bm00001000, %%mm6 \n\t"
  1593. "movd %2, %%mm5 \n\t" // QP
  1594. "movq %%mm5, %%mm4 \n\t"
  1595. "paddusb %%mm5, %%mm5 \n\t" // 2QP
  1596. "paddusb %%mm5, %%mm4 \n\t" // 3QP
  1597. "psllq $24, %%mm4 \n\t"
  1598. "pxor %%mm5, %%mm5 \n\t" // 0
  1599. "psubb %%mm4, %%mm5 \n\t" // -QP
  1600. "leal tempBlock, %%eax \n\t"
  1601. //FIXME? "unroll by 2" and mix
  1602. #ifdef HAVE_MMX2
  1603. #define HDF(src, dst) \
  1604. "movq " #src "(%%eax), %%mm0 \n\t"\
  1605. "movq " #src "(%%eax), %%mm1 \n\t"\
  1606. "movq " #src "(%%eax), %%mm2 \n\t"\
  1607. "psrlq $8, %%mm1 \n\t"\
  1608. "psubusb %%mm1, %%mm2 \n\t"\
  1609. "psubusb %%mm0, %%mm1 \n\t"\
  1610. "por %%mm2, %%mm1 \n\t" /* p´x = |px - p(x+1)| */\
  1611. "pcmpeqb %%mm7, %%mm2 \n\t" /* p´x = sgn[px - p(x+1)] */\
  1612. "pshufw $0x00, %%mm1, %%mm3 \n\t" /* p´5 = |p1 - p2| */\
  1613. "pminub %%mm1, %%mm3 \n\t" /* p´5 = min(|p2-p1|, |p6-p5|)*/\
  1614. "psrlq $16, %%mm3 \n\t" /* p´3 = min(|p2-p1|, |p6-p5|)*/\
  1615. "psubusb %%mm3, %%mm1 \n\t" /* |p3-p4|-min(|p1-p2|,|p5-p6|) */\
  1616. "paddb %%mm5, %%mm1 \n\t"\
  1617. "psubusb %%mm5, %%mm1 \n\t"\
  1618. "psrlw $2, %%mm1 \n\t"\
  1619. "pxor %%mm2, %%mm1 \n\t"\
  1620. "psubb %%mm2, %%mm1 \n\t"\
  1621. "pand %%mm6, %%mm1 \n\t"\
  1622. "psubb %%mm1, %%mm0 \n\t"\
  1623. "psllq $8, %%mm1 \n\t"\
  1624. "paddb %%mm1, %%mm0 \n\t"\
  1625. "movd %%mm0, " #dst" \n\t"\
  1626. "psrlq $32, %%mm0 \n\t"\
  1627. "movd %%mm0, 4" #dst" \n\t"
  1628. #else
  1629. #define HDF(src, dst)\
  1630. "movq " #src "(%%eax), %%mm0 \n\t"\
  1631. "movq %%mm0, %%mm1 \n\t"\
  1632. "movq %%mm0, %%mm2 \n\t"\
  1633. "psrlq $8, %%mm1 \n\t"\
  1634. "psubusb %%mm1, %%mm2 \n\t"\
  1635. "psubusb %%mm0, %%mm1 \n\t"\
  1636. "por %%mm2, %%mm1 \n\t" /* p´x = |px - p(x+1)| */\
  1637. "pcmpeqb %%mm7, %%mm2 \n\t" /* p´x = sgn[px - p(x+1)] */\
  1638. "movq %%mm1, %%mm3 \n\t"\
  1639. "psllq $32, %%mm3 \n\t"\
  1640. "movq %%mm3, %%mm4 \n\t"\
  1641. "psubusb %%mm1, %%mm4 \n\t"\
  1642. "psubb %%mm4, %%mm3 \n\t"\
  1643. "psrlq $16, %%mm3 \n\t" /* p´3 = min(|p2-p1|, |p6-p5|)*/\
  1644. "psubusb %%mm3, %%mm1 \n\t" /* |p3-p4|-min(|p1-p2|,|p5,ü6|) */\
  1645. "paddb %%mm5, %%mm1 \n\t"\
  1646. "psubusb %%mm5, %%mm1 \n\t"\
  1647. "psrlw $2, %%mm1 \n\t"\
  1648. "pxor %%mm2, %%mm1 \n\t"\
  1649. "psubb %%mm2, %%mm1 \n\t"\
  1650. "pand %%mm6, %%mm1 \n\t"\
  1651. "psubb %%mm1, %%mm0 \n\t"\
  1652. "psllq $8, %%mm1 \n\t"\
  1653. "paddb %%mm1, %%mm0 \n\t"\
  1654. "movd %%mm0, " #dst " \n\t"\
  1655. "psrlq $32, %%mm0 \n\t"\
  1656. "movd %%mm0, 4" #dst " \n\t"
  1657. #endif
  1658. HDF(0,(%0))
  1659. HDF(8,(%%ecx))
  1660. HDF(16,(%%ecx, %1))
  1661. HDF(24,(%%ecx, %1, 2))
  1662. HDF(32,(%0, %1, 4))
  1663. HDF(40,(%%ebx))
  1664. HDF(48,(%%ebx, %1))
  1665. HDF(56,(%%ebx, %1, 2))
  1666. :
  1667. : "r" (dst), "r" (stride), "r" (QP)
  1668. : "%eax", "%ebx", "%ecx"
  1669. );
  1670. #else
  1671. int y;
  1672. for(y=0; y<BLOCK_SIZE; y++)
  1673. {
  1674. const int middleEnergy= 5*(dst[4] - dst[5]) + 2*(dst[2] - dst[5]);
  1675. if(ABS(middleEnergy) < 8*QP)
  1676. {
  1677. const int q=(dst[3] - dst[4])/2;
  1678. const int leftEnergy= 5*(dst[2] - dst[1]) + 2*(dst[0] - dst[3]);
  1679. const int rightEnergy= 5*(dst[6] - dst[5]) + 2*(dst[4] - dst[7]);
  1680. int d= ABS(middleEnergy) - MIN( ABS(leftEnergy), ABS(rightEnergy) );
  1681. d= MAX(d, 0);
  1682. d= (5*d + 32) >> 6;
  1683. d*= SIGN(-middleEnergy);
  1684. if(q>0)
  1685. {
  1686. d= d<0 ? 0 : d;
  1687. d= d>q ? q : d;
  1688. }
  1689. else
  1690. {
  1691. d= d>0 ? 0 : d;
  1692. d= d<q ? q : d;
  1693. }
  1694. dst[3]-= d;
  1695. dst[4]+= d;
  1696. }
  1697. dst+= stride;
  1698. }
  1699. #endif
  1700. }
  1701. /**
  1702. * Do a horizontal low pass filter on the 10x8 block (dst points to middle 8x8 Block)
  1703. * using the 9-Tap Filter (1,1,2,2,4,2,2,1,1)/16 (C version)
  1704. * using the 7-Tap Filter (2,2,2,4,2,2,2)/16 (MMX2/3DNOW version)
  1705. */
  1706. static inline void doHorizLowPass(uint8_t dst[], int stride, int QP)
  1707. {
  1708. #if 0
  1709. asm volatile(
  1710. "leal (%0, %1), %%ecx \n\t"
  1711. "leal (%%ecx, %1, 4), %%ebx \n\t"
  1712. // 0 1 2 3 4 5 6 7 8 9
  1713. // %0 ecx ecx+%1 ecx+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1714. "pxor %%mm7, %%mm7 \n\t"
  1715. "leal tempBlock, %%eax \n\t"
  1716. /*
  1717. #define HLP1 "movq (%0), %%mm0 \n\t"\
  1718. "movq %%mm0, %%mm1 \n\t"\
  1719. "psllq $8, %%mm0 \n\t"\
  1720. PAVGB(%%mm1, %%mm0)\
  1721. "psrlw $8, %%mm0 \n\t"\
  1722. "pxor %%mm1, %%mm1 \n\t"\
  1723. "packuswb %%mm1, %%mm0 \n\t"\
  1724. "movq %%mm0, %%mm1 \n\t"\
  1725. "movq %%mm0, %%mm2 \n\t"\
  1726. "psllq $32, %%mm0 \n\t"\
  1727. "paddb %%mm0, %%mm1 \n\t"\
  1728. "psllq $16, %%mm2 \n\t"\
  1729. PAVGB(%%mm2, %%mm0)\
  1730. "movq %%mm0, %%mm3 \n\t"\
  1731. "pand bm11001100, %%mm0 \n\t"\
  1732. "paddusb %%mm0, %%mm3 \n\t"\
  1733. "psrlq $8, %%mm3 \n\t"\
  1734. PAVGB(%%mm1, %%mm4)\
  1735. PAVGB(%%mm3, %%mm2)\
  1736. "psrlq $16, %%mm2 \n\t"\
  1737. "punpcklbw %%mm2, %%mm2 \n\t"\
  1738. "movq %%mm2, (%0) \n\t"\
  1739. #define HLP2 "movq (%0), %%mm0 \n\t"\
  1740. "movq %%mm0, %%mm1 \n\t"\
  1741. "psllq $8, %%mm0 \n\t"\
  1742. PAVGB(%%mm1, %%mm0)\
  1743. "psrlw $8, %%mm0 \n\t"\
  1744. "pxor %%mm1, %%mm1 \n\t"\
  1745. "packuswb %%mm1, %%mm0 \n\t"\
  1746. "movq %%mm0, %%mm2 \n\t"\
  1747. "psllq $32, %%mm0 \n\t"\
  1748. "psllq $16, %%mm2 \n\t"\
  1749. PAVGB(%%mm2, %%mm0)\
  1750. "movq %%mm0, %%mm3 \n\t"\
  1751. "pand bm11001100, %%mm0 \n\t"\
  1752. "paddusb %%mm0, %%mm3 \n\t"\
  1753. "psrlq $8, %%mm3 \n\t"\
  1754. PAVGB(%%mm3, %%mm2)\
  1755. "psrlq $16, %%mm2 \n\t"\
  1756. "punpcklbw %%mm2, %%mm2 \n\t"\
  1757. "movq %%mm2, (%0) \n\t"\
  1758. */
  1759. // approximately a 7-Tap Filter with Vector (1,2,3,4,3,2,1)/16
  1760. /*
  1761. Implemented Exact 7-Tap
  1762. 9421 A321
  1763. 36421 64321
  1764. 334321 =
  1765. 1234321 =
  1766. 1234321 =
  1767. 123433 =
  1768. 12463 12346
  1769. 1249 123A
  1770. */
  1771. #ifdef HAVE_MMX2
  1772. #define HLP3(i) "movq " #i "(%%eax), %%mm0 \n\t"\
  1773. "movq %%mm0, %%mm1 \n\t"\
  1774. "movq %%mm0, %%mm2 \n\t"\
  1775. "movq %%mm0, %%mm3 \n\t"\
  1776. "movq %%mm0, %%mm4 \n\t"\
  1777. "psllq $8, %%mm1 \n\t"\
  1778. "psrlq $8, %%mm2 \n\t"\
  1779. "pand bm00000001, %%mm3 \n\t"\
  1780. "pand bm10000000, %%mm4 \n\t"\
  1781. "por %%mm3, %%mm1 \n\t"\
  1782. "por %%mm4, %%mm2 \n\t"\
  1783. PAVGB(%%mm2, %%mm1)\
  1784. PAVGB(%%mm1, %%mm0)\
  1785. \
  1786. "pshufw $0xF9, %%mm0, %%mm3 \n\t"\
  1787. "pshufw $0x90, %%mm0, %%mm4 \n\t"\
  1788. PAVGB(%%mm3, %%mm4)\
  1789. PAVGB(%%mm4, %%mm0)\
  1790. "movd %%mm0, (%0) \n\t"\
  1791. "psrlq $32, %%mm0 \n\t"\
  1792. "movd %%mm0, 4(%0) \n\t"
  1793. #else
  1794. #define HLP3(i) "movq " #i "(%%eax), %%mm0 \n\t"\
  1795. "movq %%mm0, %%mm1 \n\t"\
  1796. "movq %%mm0, %%mm2 \n\t"\
  1797. "movd -4(%0), %%mm3 \n\t" /*0001000*/\
  1798. "movd 8(%0), %%mm4 \n\t" /*0001000*/\
  1799. "psllq $8, %%mm1 \n\t"\
  1800. "psrlq $8, %%mm2 \n\t"\
  1801. "psrlq $24, %%mm3 \n\t"\
  1802. "psllq $56, %%mm4 \n\t"\
  1803. "por %%mm3, %%mm1 \n\t"\
  1804. "por %%mm4, %%mm2 \n\t"\
  1805. PAVGB(%%mm2, %%mm1)\
  1806. PAVGB(%%mm1, %%mm0)\
  1807. \
  1808. "movq %%mm0, %%mm3 \n\t"\
  1809. "movq %%mm0, %%mm4 \n\t"\
  1810. "movq %%mm0, %%mm5 \n\t"\
  1811. "psrlq $16, %%mm3 \n\t"\
  1812. "psllq $16, %%mm4 \n\t"\
  1813. "pand bm11000000, %%mm5 \n\t"\
  1814. "por %%mm5, %%mm3 \n\t"\
  1815. "movq %%mm0, %%mm5 \n\t"\
  1816. "pand bm00000011, %%mm5 \n\t"\
  1817. "por %%mm5, %%mm4 \n\t"\
  1818. PAVGB(%%mm3, %%mm4)\
  1819. PAVGB(%%mm4, %%mm0)\
  1820. "movd %%mm0, (%0) \n\t"\
  1821. "psrlq $32, %%mm0 \n\t"\
  1822. "movd %%mm0, 4(%0) \n\t"
  1823. #endif
  1824. /* uses the 7-Tap Filter: 1112111 */
  1825. #define NEW_HLP(src, dst)\
  1826. "movq " #src "(%%eax), %%mm1 \n\t"\
  1827. "movq " #src "(%%eax), %%mm2 \n\t"\
  1828. "psllq $8, %%mm1 \n\t"\
  1829. "psrlq $8, %%mm2 \n\t"\
  1830. "movd -4" #dst ", %%mm3 \n\t" /*0001000*/\
  1831. "movd 8" #dst ", %%mm4 \n\t" /*0001000*/\
  1832. "psrlq $24, %%mm3 \n\t"\
  1833. "psllq $56, %%mm4 \n\t"\
  1834. "por %%mm3, %%mm1 \n\t"\
  1835. "por %%mm4, %%mm2 \n\t"\
  1836. "movq %%mm1, %%mm5 \n\t"\
  1837. PAVGB(%%mm2, %%mm1)\
  1838. "movq " #src "(%%eax), %%mm0 \n\t"\
  1839. PAVGB(%%mm1, %%mm0)\
  1840. "psllq $8, %%mm5 \n\t"\
  1841. "psrlq $8, %%mm2 \n\t"\
  1842. "por %%mm3, %%mm5 \n\t"\
  1843. "por %%mm4, %%mm2 \n\t"\
  1844. "movq %%mm5, %%mm1 \n\t"\
  1845. PAVGB(%%mm2, %%mm5)\
  1846. "psllq $8, %%mm1 \n\t"\
  1847. "psrlq $8, %%mm2 \n\t"\
  1848. "por %%mm3, %%mm1 \n\t"\
  1849. "por %%mm4, %%mm2 \n\t"\
  1850. PAVGB(%%mm2, %%mm1)\
  1851. PAVGB(%%mm1, %%mm5)\
  1852. PAVGB(%%mm5, %%mm0)\
  1853. "movd %%mm0, " #dst " \n\t"\
  1854. "psrlq $32, %%mm0 \n\t"\
  1855. "movd %%mm0, 4" #dst " \n\t"
  1856. /* uses the 9-Tap Filter: 112242211 */
  1857. #define NEW_HLP2(i)\
  1858. "movq " #i "(%%eax), %%mm0 \n\t" /*0001000*/\
  1859. "movq %%mm0, %%mm1 \n\t" /*0001000*/\
  1860. "movq %%mm0, %%mm2 \n\t" /*0001000*/\
  1861. "movd -4(%0), %%mm3 \n\t" /*0001000*/\
  1862. "movd 8(%0), %%mm4 \n\t" /*0001000*/\
  1863. "psllq $8, %%mm1 \n\t"\
  1864. "psrlq $8, %%mm2 \n\t"\
  1865. "psrlq $24, %%mm3 \n\t"\
  1866. "psllq $56, %%mm4 \n\t"\
  1867. "por %%mm3, %%mm1 \n\t" /*0010000*/\
  1868. "por %%mm4, %%mm2 \n\t" /*0000100*/\
  1869. "movq %%mm1, %%mm5 \n\t" /*0010000*/\
  1870. PAVGB(%%mm2, %%mm1) /*0010100*/\
  1871. PAVGB(%%mm1, %%mm0) /*0012100*/\
  1872. "psllq $8, %%mm5 \n\t"\
  1873. "psrlq $8, %%mm2 \n\t"\
  1874. "por %%mm3, %%mm5 \n\t" /*0100000*/\
  1875. "por %%mm4, %%mm2 \n\t" /*0000010*/\
  1876. "movq %%mm5, %%mm1 \n\t" /*0100000*/\
  1877. PAVGB(%%mm2, %%mm5) /*0100010*/\
  1878. "psllq $8, %%mm1 \n\t"\
  1879. "psrlq $8, %%mm2 \n\t"\
  1880. "por %%mm3, %%mm1 \n\t" /*1000000*/\
  1881. "por %%mm4, %%mm2 \n\t" /*0000001*/\
  1882. "movq %%mm1, %%mm6 \n\t" /*1000000*/\
  1883. PAVGB(%%mm2, %%mm1) /*1000001*/\
  1884. "psllq $8, %%mm6 \n\t"\
  1885. "psrlq $8, %%mm2 \n\t"\
  1886. "por %%mm3, %%mm6 \n\t"/*100000000*/\
  1887. "por %%mm4, %%mm2 \n\t"/*000000001*/\
  1888. PAVGB(%%mm2, %%mm6) /*100000001*/\
  1889. PAVGB(%%mm6, %%mm1) /*110000011*/\
  1890. PAVGB(%%mm1, %%mm5) /*112000211*/\
  1891. PAVGB(%%mm5, %%mm0) /*112242211*/\
  1892. "movd %%mm0, (%0) \n\t"\
  1893. "psrlq $32, %%mm0 \n\t"\
  1894. "movd %%mm0, 4(%0) \n\t"
  1895. #define HLP(src, dst) NEW_HLP(src, dst)
  1896. HLP(0, (%0))
  1897. HLP(8, (%%ecx))
  1898. HLP(16, (%%ecx, %1))
  1899. HLP(24, (%%ecx, %1, 2))
  1900. HLP(32, (%0, %1, 4))
  1901. HLP(40, (%%ebx))
  1902. HLP(48, (%%ebx, %1))
  1903. HLP(56, (%%ebx, %1, 2))
  1904. :
  1905. : "r" (dst), "r" (stride)
  1906. : "%eax", "%ebx", "%ecx"
  1907. );
  1908. #else
  1909. int y;
  1910. for(y=0; y<BLOCK_SIZE; y++)
  1911. {
  1912. const int first= ABS(dst[-1] - dst[0]) < QP ? dst[-1] : dst[0];
  1913. const int last= ABS(dst[8] - dst[7]) < QP ? dst[8] : dst[7];
  1914. int sums[9];
  1915. sums[0] = first + dst[0];
  1916. sums[1] = dst[0] + dst[1];
  1917. sums[2] = dst[1] + dst[2];
  1918. sums[3] = dst[2] + dst[3];
  1919. sums[4] = dst[3] + dst[4];
  1920. sums[5] = dst[4] + dst[5];
  1921. sums[6] = dst[5] + dst[6];
  1922. sums[7] = dst[6] + dst[7];
  1923. sums[8] = dst[7] + last;
  1924. dst[0]= ((sums[0]<<2) + ((first + sums[2])<<1) + sums[4] + 8)>>4;
  1925. dst[1]= ((dst[1]<<2) + ((first + sums[0] + sums[3])<<1) + sums[5] + 8)>>4;
  1926. dst[2]= ((dst[2]<<2) + ((first + sums[1] + sums[4])<<1) + sums[6] + 8)>>4;
  1927. dst[3]= ((dst[3]<<2) + ((sums[2] + sums[5])<<1) + sums[0] + sums[7] + 8)>>4;
  1928. dst[4]= ((dst[4]<<2) + ((sums[3] + sums[6])<<1) + sums[1] + sums[8] + 8)>>4;
  1929. dst[5]= ((dst[5]<<2) + ((last + sums[7] + sums[4])<<1) + sums[2] + 8)>>4;
  1930. dst[6]= (((last + dst[6])<<2) + ((dst[7] + sums[5])<<1) + sums[3] + 8)>>4;
  1931. dst[7]= ((sums[8]<<2) + ((last + sums[6])<<1) + sums[4] + 8)>>4;
  1932. dst+= stride;
  1933. }
  1934. #endif
  1935. }
  1936. static inline void dering(uint8_t src[], int stride, int QP)
  1937. {
  1938. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1939. asm volatile(
  1940. "movq pQPb, %%mm0 \n\t"
  1941. "paddusb %%mm0, %%mm0 \n\t"
  1942. "movq %%mm0, pQPb2 \n\t"
  1943. "leal (%0, %1), %%eax \n\t"
  1944. "leal (%%eax, %1, 4), %%ebx \n\t"
  1945. // 0 1 2 3 4 5 6 7 8 9
  1946. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1947. "pcmpeqb %%mm6, %%mm6 \n\t"
  1948. "pxor %%mm7, %%mm7 \n\t"
  1949. #ifdef HAVE_MMX2
  1950. #define FIND_MIN_MAX(addr)\
  1951. "movq " #addr ", %%mm0 \n\t"\
  1952. "pminub %%mm0, %%mm6 \n\t"\
  1953. "pmaxub %%mm0, %%mm7 \n\t"
  1954. #else
  1955. #define FIND_MIN_MAX(addr)\
  1956. "movq " #addr ", %%mm0 \n\t"\
  1957. "movq %%mm6, %%mm1 \n\t"\
  1958. "psubusb %%mm0, %%mm7 \n\t"\
  1959. "paddb %%mm0, %%mm7 \n\t"\
  1960. "psubusb %%mm0, %%mm1 \n\t"\
  1961. "psubb %%mm1, %%mm6 \n\t"
  1962. #endif
  1963. FIND_MIN_MAX((%%eax))
  1964. FIND_MIN_MAX((%%eax, %1))
  1965. FIND_MIN_MAX((%%eax, %1, 2))
  1966. FIND_MIN_MAX((%0, %1, 4))
  1967. FIND_MIN_MAX((%%ebx))
  1968. FIND_MIN_MAX((%%ebx, %1))
  1969. FIND_MIN_MAX((%%ebx, %1, 2))
  1970. FIND_MIN_MAX((%0, %1, 8))
  1971. "movq %%mm6, %%mm4 \n\t"
  1972. "psrlq $8, %%mm6 \n\t"
  1973. #ifdef HAVE_MMX2
  1974. "pminub %%mm4, %%mm6 \n\t" // min of pixels
  1975. "pshufw $0xF9, %%mm6, %%mm4 \n\t"
  1976. "pminub %%mm4, %%mm6 \n\t" // min of pixels
  1977. "pshufw $0xFE, %%mm6, %%mm4 \n\t"
  1978. "pminub %%mm4, %%mm6 \n\t"
  1979. #else
  1980. "movq %%mm6, %%mm1 \n\t"
  1981. "psubusb %%mm4, %%mm1 \n\t"
  1982. "psubb %%mm1, %%mm6 \n\t"
  1983. "movq %%mm6, %%mm4 \n\t"
  1984. "psrlq $16, %%mm6 \n\t"
  1985. "movq %%mm6, %%mm1 \n\t"
  1986. "psubusb %%mm4, %%mm1 \n\t"
  1987. "psubb %%mm1, %%mm6 \n\t"
  1988. "movq %%mm6, %%mm4 \n\t"
  1989. "psrlq $32, %%mm6 \n\t"
  1990. "movq %%mm6, %%mm1 \n\t"
  1991. "psubusb %%mm4, %%mm1 \n\t"
  1992. "psubb %%mm1, %%mm6 \n\t"
  1993. #endif
  1994. "movq %%mm7, %%mm4 \n\t"
  1995. "psrlq $8, %%mm7 \n\t"
  1996. #ifdef HAVE_MMX2
  1997. "pmaxub %%mm4, %%mm7 \n\t" // max of pixels
  1998. "pshufw $0xF9, %%mm7, %%mm4 \n\t"
  1999. "pmaxub %%mm4, %%mm7 \n\t"
  2000. "pshufw $0xFE, %%mm7, %%mm4 \n\t"
  2001. "pmaxub %%mm4, %%mm7 \n\t"
  2002. #else
  2003. "psubusb %%mm4, %%mm7 \n\t"
  2004. "paddb %%mm4, %%mm7 \n\t"
  2005. "movq %%mm7, %%mm4 \n\t"
  2006. "psrlq $16, %%mm7 \n\t"
  2007. "psubusb %%mm4, %%mm7 \n\t"
  2008. "paddb %%mm4, %%mm7 \n\t"
  2009. "movq %%mm7, %%mm4 \n\t"
  2010. "psrlq $32, %%mm7 \n\t"
  2011. "psubusb %%mm4, %%mm7 \n\t"
  2012. "paddb %%mm4, %%mm7 \n\t"
  2013. #endif
  2014. PAVGB(%%mm6, %%mm7) // a=(max + min)/2
  2015. "punpcklbw %%mm7, %%mm7 \n\t"
  2016. "punpcklbw %%mm7, %%mm7 \n\t"
  2017. "punpcklbw %%mm7, %%mm7 \n\t"
  2018. "movq %%mm7, temp0 \n\t"
  2019. "movq (%0), %%mm0 \n\t" // L10
  2020. "movq %%mm0, %%mm1 \n\t" // L10
  2021. "movq %%mm0, %%mm2 \n\t" // L10
  2022. "psllq $8, %%mm1 \n\t"
  2023. "psrlq $8, %%mm2 \n\t"
  2024. "movd -4(%0), %%mm3 \n\t"
  2025. "movd 8(%0), %%mm4 \n\t"
  2026. "psrlq $24, %%mm3 \n\t"
  2027. "psllq $56, %%mm4 \n\t"
  2028. "por %%mm3, %%mm1 \n\t" // L00
  2029. "por %%mm4, %%mm2 \n\t" // L20
  2030. "movq %%mm1, %%mm3 \n\t" // L00
  2031. PAVGB(%%mm2, %%mm1) // (L20 + L00)/2
  2032. PAVGB(%%mm0, %%mm1) // (L20 + L00 + 2L10)/4
  2033. "psubusb %%mm7, %%mm0 \n\t"
  2034. "psubusb %%mm7, %%mm2 \n\t"
  2035. "psubusb %%mm7, %%mm3 \n\t"
  2036. "pcmpeqb b00, %%mm0 \n\t" // L10 > a ? 0 : -1
  2037. "pcmpeqb b00, %%mm2 \n\t" // L20 > a ? 0 : -1
  2038. "pcmpeqb b00, %%mm3 \n\t" // L00 > a ? 0 : -1
  2039. "paddb %%mm2, %%mm0 \n\t"
  2040. "paddb %%mm3, %%mm0 \n\t"
  2041. "movq (%%eax), %%mm2 \n\t" // L11
  2042. "movq %%mm2, %%mm3 \n\t" // L11
  2043. "movq %%mm2, %%mm4 \n\t" // L11
  2044. "psllq $8, %%mm3 \n\t"
  2045. "psrlq $8, %%mm4 \n\t"
  2046. "movd -4(%%eax), %%mm5 \n\t"
  2047. "movd 8(%%eax), %%mm6 \n\t"
  2048. "psrlq $24, %%mm5 \n\t"
  2049. "psllq $56, %%mm6 \n\t"
  2050. "por %%mm5, %%mm3 \n\t" // L01
  2051. "por %%mm6, %%mm4 \n\t" // L21
  2052. "movq %%mm3, %%mm5 \n\t" // L01
  2053. PAVGB(%%mm4, %%mm3) // (L21 + L01)/2
  2054. PAVGB(%%mm2, %%mm3) // (L21 + L01 + 2L11)/4
  2055. "psubusb %%mm7, %%mm2 \n\t"
  2056. "psubusb %%mm7, %%mm4 \n\t"
  2057. "psubusb %%mm7, %%mm5 \n\t"
  2058. "pcmpeqb b00, %%mm2 \n\t" // L11 > a ? 0 : -1
  2059. "pcmpeqb b00, %%mm4 \n\t" // L21 > a ? 0 : -1
  2060. "pcmpeqb b00, %%mm5 \n\t" // L01 > a ? 0 : -1
  2061. "paddb %%mm4, %%mm2 \n\t"
  2062. "paddb %%mm5, %%mm2 \n\t"
  2063. // 0, 2, 3, 1
  2064. #define DERING_CORE(dst,src,ppsx,psx,sx,pplx,plx,lx,t0,t1) \
  2065. "movq " #src ", " #sx " \n\t" /* src[0] */\
  2066. "movq " #sx ", " #lx " \n\t" /* src[0] */\
  2067. "movq " #sx ", " #t0 " \n\t" /* src[0] */\
  2068. "psllq $8, " #lx " \n\t"\
  2069. "psrlq $8, " #t0 " \n\t"\
  2070. "movd -4" #src ", " #t1 " \n\t"\
  2071. "psrlq $24, " #t1 " \n\t"\
  2072. "por " #t1 ", " #lx " \n\t" /* src[-1] */\
  2073. "movd 8" #src ", " #t1 " \n\t"\
  2074. "psllq $56, " #t1 " \n\t"\
  2075. "por " #t1 ", " #t0 " \n\t" /* src[+1] */\
  2076. "movq " #lx ", " #t1 " \n\t" /* src[-1] */\
  2077. PAVGB(t0, lx) /* (src[-1] + src[+1])/2 */\
  2078. PAVGB(sx, lx) /* (src[-1] + 2src[0] + src[+1])/4 */\
  2079. PAVGB(lx, pplx) \
  2080. "movq " #lx ", temp1 \n\t"\
  2081. "movq temp0, " #lx " \n\t"\
  2082. "psubusb " #lx ", " #t1 " \n\t"\
  2083. "psubusb " #lx ", " #t0 " \n\t"\
  2084. "psubusb " #lx ", " #sx " \n\t"\
  2085. "movq b00, " #lx " \n\t"\
  2086. "pcmpeqb " #lx ", " #t1 " \n\t" /* src[-1] > a ? 0 : -1*/\
  2087. "pcmpeqb " #lx ", " #t0 " \n\t" /* src[+1] > a ? 0 : -1*/\
  2088. "pcmpeqb " #lx ", " #sx " \n\t" /* src[0] > a ? 0 : -1*/\
  2089. "paddb " #t1 ", " #t0 " \n\t"\
  2090. "paddb " #t0 ", " #sx " \n\t"\
  2091. \
  2092. PAVGB(plx, pplx) /* filtered */\
  2093. "movq " #dst ", " #t0 " \n\t" /* dst */\
  2094. "movq " #t0 ", " #t1 " \n\t" /* dst */\
  2095. "psubusb pQPb2, " #t0 " \n\t"\
  2096. "paddusb pQPb2, " #t1 " \n\t"\
  2097. PMAXUB(t0, pplx)\
  2098. PMINUB(t1, pplx, t0)\
  2099. "paddb " #sx ", " #ppsx " \n\t"\
  2100. "paddb " #psx ", " #ppsx " \n\t"\
  2101. "#paddb b02, " #ppsx " \n\t"\
  2102. "pand b08, " #ppsx " \n\t"\
  2103. "pcmpeqb " #lx ", " #ppsx " \n\t"\
  2104. "pand " #ppsx ", " #pplx " \n\t"\
  2105. "pandn " #dst ", " #ppsx " \n\t"\
  2106. "por " #pplx ", " #ppsx " \n\t"\
  2107. "movq " #ppsx ", " #dst " \n\t"\
  2108. "movq temp1, " #lx " \n\t"
  2109. /*
  2110. 0000000
  2111. 1111111
  2112. 1111110
  2113. 1111101
  2114. 1111100
  2115. 1111011
  2116. 1111010
  2117. 1111001
  2118. 1111000
  2119. 1110111
  2120. */
  2121. //DERING_CORE(dst,src ,ppsx ,psx ,sx ,pplx ,plx ,lx ,t0 ,t1)
  2122. DERING_CORE((%%eax),(%%eax, %1) ,%%mm0,%%mm2,%%mm4,%%mm1,%%mm3,%%mm5,%%mm6,%%mm7)
  2123. DERING_CORE((%%eax, %1),(%%eax, %1, 2) ,%%mm2,%%mm4,%%mm0,%%mm3,%%mm5,%%mm1,%%mm6,%%mm7)
  2124. DERING_CORE((%%eax, %1, 2),(%0, %1, 4) ,%%mm4,%%mm0,%%mm2,%%mm5,%%mm1,%%mm3,%%mm6,%%mm7)
  2125. DERING_CORE((%0, %1, 4),(%%ebx) ,%%mm0,%%mm2,%%mm4,%%mm1,%%mm3,%%mm5,%%mm6,%%mm7)
  2126. DERING_CORE((%%ebx),(%%ebx, %1) ,%%mm2,%%mm4,%%mm0,%%mm3,%%mm5,%%mm1,%%mm6,%%mm7)
  2127. DERING_CORE((%%ebx, %1), (%%ebx, %1, 2),%%mm4,%%mm0,%%mm2,%%mm5,%%mm1,%%mm3,%%mm6,%%mm7)
  2128. DERING_CORE((%%ebx, %1, 2),(%0, %1, 8) ,%%mm0,%%mm2,%%mm4,%%mm1,%%mm3,%%mm5,%%mm6,%%mm7)
  2129. DERING_CORE((%0, %1, 8),(%%ebx, %1, 4) ,%%mm2,%%mm4,%%mm0,%%mm3,%%mm5,%%mm1,%%mm6,%%mm7)
  2130. : : "r" (src), "r" (stride), "r" (QP)
  2131. : "%eax", "%ebx"
  2132. );
  2133. #else
  2134. int y;
  2135. int min=255;
  2136. int max=0;
  2137. int avg;
  2138. uint8_t *p;
  2139. int s[10];
  2140. for(y=1; y<9; y++)
  2141. {
  2142. int x;
  2143. p= src + stride*y;
  2144. for(x=1; x<9; x++)
  2145. {
  2146. p++;
  2147. if(*p > max) max= *p;
  2148. if(*p < min) min= *p;
  2149. }
  2150. }
  2151. avg= (min + max + 1)/2;
  2152. for(y=0; y<10; y++)
  2153. {
  2154. int x;
  2155. int t = 0;
  2156. p= src + stride*y;
  2157. for(x=0; x<10; x++)
  2158. {
  2159. if(*p > avg) t |= (1<<x);
  2160. p++;
  2161. }
  2162. t |= (~t)<<16;
  2163. t &= (t<<1) & (t>>1);
  2164. s[y] = t;
  2165. }
  2166. for(y=1; y<9; y++)
  2167. {
  2168. int x;
  2169. int t = s[y-1] & s[y] & s[y+1];
  2170. t|= t>>16;
  2171. p= src + stride*y;
  2172. for(x=1; x<9; x++)
  2173. {
  2174. p++;
  2175. if(t & (1<<x))
  2176. {
  2177. int f= (*(p-stride-1)) + 2*(*(p-stride)) + (*(p-stride+1))
  2178. +2*(*(p -1)) + 4*(*p ) + 2*(*(p +1))
  2179. +(*(p+stride-1)) + 2*(*(p+stride)) + (*(p+stride+1));
  2180. f= (f + 8)>>4;
  2181. if (*p + 2*QP < f) *p= *p + 2*QP;
  2182. else if(*p - 2*QP > f) *p= *p - 2*QP;
  2183. else *p=f;
  2184. }
  2185. }
  2186. }
  2187. #endif
  2188. }
  2189. /**
  2190. * Deinterlaces the given block
  2191. * will be called for every 8x8 block and can read & write from line 4-15
  2192. * lines 0-3 have been passed through the deblock / dering filters allready, but can be read too
  2193. * lines 4-12 will be read into the deblocking filter and should be deinterlaced
  2194. */
  2195. static inline void deInterlaceInterpolateLinear(uint8_t src[], int stride)
  2196. {
  2197. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  2198. src+= 4*stride;
  2199. asm volatile(
  2200. "leal (%0, %1), %%eax \n\t"
  2201. "leal (%%eax, %1, 4), %%ebx \n\t"
  2202. // 0 1 2 3 4 5 6 7 8 9
  2203. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  2204. "movq (%0), %%mm0 \n\t"
  2205. "movq (%%eax, %1), %%mm1 \n\t"
  2206. PAVGB(%%mm1, %%mm0)
  2207. "movq %%mm0, (%%eax) \n\t"
  2208. "movq (%0, %1, 4), %%mm0 \n\t"
  2209. PAVGB(%%mm0, %%mm1)
  2210. "movq %%mm1, (%%eax, %1, 2) \n\t"
  2211. "movq (%%ebx, %1), %%mm1 \n\t"
  2212. PAVGB(%%mm1, %%mm0)
  2213. "movq %%mm0, (%%ebx) \n\t"
  2214. "movq (%0, %1, 8), %%mm0 \n\t"
  2215. PAVGB(%%mm0, %%mm1)
  2216. "movq %%mm1, (%%ebx, %1, 2) \n\t"
  2217. : : "r" (src), "r" (stride)
  2218. : "%eax", "%ebx"
  2219. );
  2220. #else
  2221. int x;
  2222. src+= 4*stride;
  2223. for(x=0; x<8; x++)
  2224. {
  2225. src[stride] = (src[0] + src[stride*2])>>1;
  2226. src[stride*3] = (src[stride*2] + src[stride*4])>>1;
  2227. src[stride*5] = (src[stride*4] + src[stride*6])>>1;
  2228. src[stride*7] = (src[stride*6] + src[stride*8])>>1;
  2229. src++;
  2230. }
  2231. #endif
  2232. }
  2233. /**
  2234. * Deinterlaces the given block
  2235. * will be called for every 8x8 block and can read & write from line 4-15
  2236. * lines 0-3 have been passed through the deblock / dering filters allready, but can be read too
  2237. * lines 4-12 will be read into the deblocking filter and should be deinterlaced
  2238. * this filter will read lines 3-15 and write 7-13
  2239. * no cliping in C version
  2240. */
  2241. static inline void deInterlaceInterpolateCubic(uint8_t src[], int stride)
  2242. {
  2243. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  2244. src+= stride*3;
  2245. asm volatile(
  2246. "leal (%0, %1), %%eax \n\t"
  2247. "leal (%%eax, %1, 4), %%ebx \n\t"
  2248. "leal (%%ebx, %1, 4), %%ecx \n\t"
  2249. "addl %1, %%ecx \n\t"
  2250. "pxor %%mm7, %%mm7 \n\t"
  2251. // 0 1 2 3 4 5 6 7 8 9 10
  2252. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1 ecx
  2253. #define DEINT_CUBIC(a,b,c,d,e)\
  2254. "movq " #a ", %%mm0 \n\t"\
  2255. "movq " #b ", %%mm1 \n\t"\
  2256. "movq " #d ", %%mm2 \n\t"\
  2257. "movq " #e ", %%mm3 \n\t"\
  2258. PAVGB(%%mm2, %%mm1) /* (b+d) /2 */\
  2259. PAVGB(%%mm3, %%mm0) /* a(a+e) /2 */\
  2260. "movq %%mm0, %%mm2 \n\t"\
  2261. "punpcklbw %%mm7, %%mm0 \n\t"\
  2262. "punpckhbw %%mm7, %%mm2 \n\t"\
  2263. "movq %%mm1, %%mm3 \n\t"\
  2264. "punpcklbw %%mm7, %%mm1 \n\t"\
  2265. "punpckhbw %%mm7, %%mm3 \n\t"\
  2266. "psubw %%mm1, %%mm0 \n\t" /* L(a+e - (b+d))/2 */\
  2267. "psubw %%mm3, %%mm2 \n\t" /* H(a+e - (b+d))/2 */\
  2268. "psraw $3, %%mm0 \n\t" /* L(a+e - (b+d))/16 */\
  2269. "psraw $3, %%mm2 \n\t" /* H(a+e - (b+d))/16 */\
  2270. "psubw %%mm0, %%mm1 \n\t" /* L(9b + 9d - a - e)/16 */\
  2271. "psubw %%mm2, %%mm3 \n\t" /* H(9b + 9d - a - e)/16 */\
  2272. "packuswb %%mm3, %%mm1 \n\t"\
  2273. "movq %%mm1, " #c " \n\t"
  2274. DEINT_CUBIC((%0), (%%eax, %1), (%%eax, %1, 2), (%0, %1, 4), (%%ebx, %1))
  2275. DEINT_CUBIC((%%eax, %1), (%0, %1, 4), (%%ebx), (%%ebx, %1), (%0, %1, 8))
  2276. DEINT_CUBIC((%0, %1, 4), (%%ebx, %1), (%%ebx, %1, 2), (%0, %1, 8), (%%ecx))
  2277. DEINT_CUBIC((%%ebx, %1), (%0, %1, 8), (%%ebx, %1, 4), (%%ecx), (%%ecx, %1, 2))
  2278. : : "r" (src), "r" (stride)
  2279. : "%eax", "%ebx", "ecx"
  2280. );
  2281. #else
  2282. int x;
  2283. src+= stride*3;
  2284. for(x=0; x<8; x++)
  2285. {
  2286. src[stride*3] = (-src[0] + 9*src[stride*2] + 9*src[stride*4] - src[stride*6])>>4;
  2287. src[stride*5] = (-src[stride*2] + 9*src[stride*4] + 9*src[stride*6] - src[stride*8])>>4;
  2288. src[stride*7] = (-src[stride*4] + 9*src[stride*6] + 9*src[stride*8] - src[stride*10])>>4;
  2289. src[stride*9] = (-src[stride*6] + 9*src[stride*8] + 9*src[stride*10] - src[stride*12])>>4;
  2290. src++;
  2291. }
  2292. #endif
  2293. }
  2294. /**
  2295. * Deinterlaces the given block
  2296. * will be called for every 8x8 block and can read & write from line 4-15
  2297. * lines 0-3 have been passed through the deblock / dering filters allready, but can be read too
  2298. * lines 4-12 will be read into the deblocking filter and should be deinterlaced
  2299. * will shift the image up by 1 line (FIXME if this is a problem)
  2300. * this filter will read lines 4-13 and write 4-11
  2301. */
  2302. static inline void deInterlaceBlendLinear(uint8_t src[], int stride)
  2303. {
  2304. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  2305. src+= 4*stride;
  2306. asm volatile(
  2307. "leal (%0, %1), %%eax \n\t"
  2308. "leal (%%eax, %1, 4), %%ebx \n\t"
  2309. // 0 1 2 3 4 5 6 7 8 9
  2310. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  2311. "movq (%0), %%mm0 \n\t" // L0
  2312. "movq (%%eax, %1), %%mm1 \n\t" // L2
  2313. PAVGB(%%mm1, %%mm0) // L0+L2
  2314. "movq (%%eax), %%mm2 \n\t" // L1
  2315. PAVGB(%%mm2, %%mm0)
  2316. "movq %%mm0, (%0) \n\t"
  2317. "movq (%%eax, %1, 2), %%mm0 \n\t" // L3
  2318. PAVGB(%%mm0, %%mm2) // L1+L3
  2319. PAVGB(%%mm1, %%mm2) // 2L2 + L1 + L3
  2320. "movq %%mm2, (%%eax) \n\t"
  2321. "movq (%0, %1, 4), %%mm2 \n\t" // L4
  2322. PAVGB(%%mm2, %%mm1) // L2+L4
  2323. PAVGB(%%mm0, %%mm1) // 2L3 + L2 + L4
  2324. "movq %%mm1, (%%eax, %1) \n\t"
  2325. "movq (%%ebx), %%mm1 \n\t" // L5
  2326. PAVGB(%%mm1, %%mm0) // L3+L5
  2327. PAVGB(%%mm2, %%mm0) // 2L4 + L3 + L5
  2328. "movq %%mm0, (%%eax, %1, 2) \n\t"
  2329. "movq (%%ebx, %1), %%mm0 \n\t" // L6
  2330. PAVGB(%%mm0, %%mm2) // L4+L6
  2331. PAVGB(%%mm1, %%mm2) // 2L5 + L4 + L6
  2332. "movq %%mm2, (%0, %1, 4) \n\t"
  2333. "movq (%%ebx, %1, 2), %%mm2 \n\t" // L7
  2334. PAVGB(%%mm2, %%mm1) // L5+L7
  2335. PAVGB(%%mm0, %%mm1) // 2L6 + L5 + L7
  2336. "movq %%mm1, (%%ebx) \n\t"
  2337. "movq (%0, %1, 8), %%mm1 \n\t" // L8
  2338. PAVGB(%%mm1, %%mm0) // L6+L8
  2339. PAVGB(%%mm2, %%mm0) // 2L7 + L6 + L8
  2340. "movq %%mm0, (%%ebx, %1) \n\t"
  2341. "movq (%%ebx, %1, 4), %%mm0 \n\t" // L9
  2342. PAVGB(%%mm0, %%mm2) // L7+L9
  2343. PAVGB(%%mm1, %%mm2) // 2L8 + L7 + L9
  2344. "movq %%mm2, (%%ebx, %1, 2) \n\t"
  2345. : : "r" (src), "r" (stride)
  2346. : "%eax", "%ebx"
  2347. );
  2348. #else
  2349. int x;
  2350. src+= 4*stride;
  2351. for(x=0; x<8; x++)
  2352. {
  2353. src[0 ] = (src[0 ] + 2*src[stride ] + src[stride*2])>>2;
  2354. src[stride ] = (src[stride ] + 2*src[stride*2] + src[stride*3])>>2;
  2355. src[stride*2] = (src[stride*2] + 2*src[stride*3] + src[stride*4])>>2;
  2356. src[stride*3] = (src[stride*3] + 2*src[stride*4] + src[stride*5])>>2;
  2357. src[stride*4] = (src[stride*4] + 2*src[stride*5] + src[stride*6])>>2;
  2358. src[stride*5] = (src[stride*5] + 2*src[stride*6] + src[stride*7])>>2;
  2359. src[stride*6] = (src[stride*6] + 2*src[stride*7] + src[stride*8])>>2;
  2360. src[stride*7] = (src[stride*7] + 2*src[stride*8] + src[stride*9])>>2;
  2361. src++;
  2362. }
  2363. #endif
  2364. }
  2365. /**
  2366. * Deinterlaces the given block
  2367. * will be called for every 8x8 block and can read & write from line 4-15,
  2368. * lines 0-3 have been passed through the deblock / dering filters allready, but can be read too
  2369. * lines 4-12 will be read into the deblocking filter and should be deinterlaced
  2370. */
  2371. static inline void deInterlaceMedian(uint8_t src[], int stride)
  2372. {
  2373. #ifdef HAVE_MMX
  2374. src+= 4*stride;
  2375. #ifdef HAVE_MMX2
  2376. asm volatile(
  2377. "leal (%0, %1), %%eax \n\t"
  2378. "leal (%%eax, %1, 4), %%ebx \n\t"
  2379. // 0 1 2 3 4 5 6 7 8 9
  2380. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  2381. "movq (%0), %%mm0 \n\t" //
  2382. "movq (%%eax, %1), %%mm2 \n\t" //
  2383. "movq (%%eax), %%mm1 \n\t" //
  2384. "movq %%mm0, %%mm3 \n\t"
  2385. "pmaxub %%mm1, %%mm0 \n\t" //
  2386. "pminub %%mm3, %%mm1 \n\t" //
  2387. "pmaxub %%mm2, %%mm1 \n\t" //
  2388. "pminub %%mm1, %%mm0 \n\t"
  2389. "movq %%mm0, (%%eax) \n\t"
  2390. "movq (%0, %1, 4), %%mm0 \n\t" //
  2391. "movq (%%eax, %1, 2), %%mm1 \n\t" //
  2392. "movq %%mm2, %%mm3 \n\t"
  2393. "pmaxub %%mm1, %%mm2 \n\t" //
  2394. "pminub %%mm3, %%mm1 \n\t" //
  2395. "pmaxub %%mm0, %%mm1 \n\t" //
  2396. "pminub %%mm1, %%mm2 \n\t"
  2397. "movq %%mm2, (%%eax, %1, 2) \n\t"
  2398. "movq (%%ebx), %%mm2 \n\t" //
  2399. "movq (%%ebx, %1), %%mm1 \n\t" //
  2400. "movq %%mm2, %%mm3 \n\t"
  2401. "pmaxub %%mm0, %%mm2 \n\t" //
  2402. "pminub %%mm3, %%mm0 \n\t" //
  2403. "pmaxub %%mm1, %%mm0 \n\t" //
  2404. "pminub %%mm0, %%mm2 \n\t"
  2405. "movq %%mm2, (%%ebx) \n\t"
  2406. "movq (%%ebx, %1, 2), %%mm2 \n\t" //
  2407. "movq (%0, %1, 8), %%mm0 \n\t" //
  2408. "movq %%mm2, %%mm3 \n\t"
  2409. "pmaxub %%mm0, %%mm2 \n\t" //
  2410. "pminub %%mm3, %%mm0 \n\t" //
  2411. "pmaxub %%mm1, %%mm0 \n\t" //
  2412. "pminub %%mm0, %%mm2 \n\t"
  2413. "movq %%mm2, (%%ebx, %1, 2) \n\t"
  2414. : : "r" (src), "r" (stride)
  2415. : "%eax", "%ebx"
  2416. );
  2417. #else // MMX without MMX2
  2418. asm volatile(
  2419. "leal (%0, %1), %%eax \n\t"
  2420. "leal (%%eax, %1, 4), %%ebx \n\t"
  2421. // 0 1 2 3 4 5 6 7 8 9
  2422. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  2423. "pxor %%mm7, %%mm7 \n\t"
  2424. #define MEDIAN(a,b,c)\
  2425. "movq " #a ", %%mm0 \n\t"\
  2426. "movq " #b ", %%mm2 \n\t"\
  2427. "movq " #c ", %%mm1 \n\t"\
  2428. "movq %%mm0, %%mm3 \n\t"\
  2429. "movq %%mm1, %%mm4 \n\t"\
  2430. "movq %%mm2, %%mm5 \n\t"\
  2431. "psubusb %%mm1, %%mm3 \n\t"\
  2432. "psubusb %%mm2, %%mm4 \n\t"\
  2433. "psubusb %%mm0, %%mm5 \n\t"\
  2434. "pcmpeqb %%mm7, %%mm3 \n\t"\
  2435. "pcmpeqb %%mm7, %%mm4 \n\t"\
  2436. "pcmpeqb %%mm7, %%mm5 \n\t"\
  2437. "movq %%mm3, %%mm6 \n\t"\
  2438. "pxor %%mm4, %%mm3 \n\t"\
  2439. "pxor %%mm5, %%mm4 \n\t"\
  2440. "pxor %%mm6, %%mm5 \n\t"\
  2441. "por %%mm3, %%mm1 \n\t"\
  2442. "por %%mm4, %%mm2 \n\t"\
  2443. "por %%mm5, %%mm0 \n\t"\
  2444. "pand %%mm2, %%mm0 \n\t"\
  2445. "pand %%mm1, %%mm0 \n\t"\
  2446. "movq %%mm0, " #b " \n\t"
  2447. MEDIAN((%0), (%%eax), (%%eax, %1))
  2448. MEDIAN((%%eax, %1), (%%eax, %1, 2), (%0, %1, 4))
  2449. MEDIAN((%0, %1, 4), (%%ebx), (%%ebx, %1))
  2450. MEDIAN((%%ebx, %1), (%%ebx, %1, 2), (%0, %1, 8))
  2451. : : "r" (src), "r" (stride)
  2452. : "%eax", "%ebx"
  2453. );
  2454. #endif // MMX
  2455. #else
  2456. //FIXME
  2457. int x;
  2458. src+= 4*stride;
  2459. for(x=0; x<8; x++)
  2460. {
  2461. src[0 ] = (src[0 ] + 2*src[stride ] + src[stride*2])>>2;
  2462. src[stride ] = (src[stride ] + 2*src[stride*2] + src[stride*3])>>2;
  2463. src[stride*2] = (src[stride*2] + 2*src[stride*3] + src[stride*4])>>2;
  2464. src[stride*3] = (src[stride*3] + 2*src[stride*4] + src[stride*5])>>2;
  2465. src[stride*4] = (src[stride*4] + 2*src[stride*5] + src[stride*6])>>2;
  2466. src[stride*5] = (src[stride*5] + 2*src[stride*6] + src[stride*7])>>2;
  2467. src[stride*6] = (src[stride*6] + 2*src[stride*7] + src[stride*8])>>2;
  2468. src[stride*7] = (src[stride*7] + 2*src[stride*8] + src[stride*9])>>2;
  2469. src++;
  2470. }
  2471. #endif
  2472. }
  2473. #ifdef HAVE_MMX
  2474. /**
  2475. * transposes and shift the given 8x8 Block into dst1 and dst2
  2476. */
  2477. static inline void transpose1(uint8_t *dst1, uint8_t *dst2, uint8_t *src, int srcStride)
  2478. {
  2479. asm(
  2480. "leal (%0, %1), %%eax \n\t"
  2481. "leal (%%eax, %1, 4), %%ebx \n\t"
  2482. // 0 1 2 3 4 5 6 7 8 9
  2483. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  2484. "movq (%0), %%mm0 \n\t" // 12345678
  2485. "movq (%%eax), %%mm1 \n\t" // abcdefgh
  2486. "movq %%mm0, %%mm2 \n\t" // 12345678
  2487. "punpcklbw %%mm1, %%mm0 \n\t" // 1a2b3c4d
  2488. "punpckhbw %%mm1, %%mm2 \n\t" // 5e6f7g8h
  2489. "movq (%%eax, %1), %%mm1 \n\t"
  2490. "movq (%%eax, %1, 2), %%mm3 \n\t"
  2491. "movq %%mm1, %%mm4 \n\t"
  2492. "punpcklbw %%mm3, %%mm1 \n\t"
  2493. "punpckhbw %%mm3, %%mm4 \n\t"
  2494. "movq %%mm0, %%mm3 \n\t"
  2495. "punpcklwd %%mm1, %%mm0 \n\t"
  2496. "punpckhwd %%mm1, %%mm3 \n\t"
  2497. "movq %%mm2, %%mm1 \n\t"
  2498. "punpcklwd %%mm4, %%mm2 \n\t"
  2499. "punpckhwd %%mm4, %%mm1 \n\t"
  2500. "movd %%mm0, 128(%2) \n\t"
  2501. "psrlq $32, %%mm0 \n\t"
  2502. "movd %%mm0, 144(%2) \n\t"
  2503. "movd %%mm3, 160(%2) \n\t"
  2504. "psrlq $32, %%mm3 \n\t"
  2505. "movd %%mm3, 176(%2) \n\t"
  2506. "movd %%mm3, 48(%3) \n\t"
  2507. "movd %%mm2, 192(%2) \n\t"
  2508. "movd %%mm2, 64(%3) \n\t"
  2509. "psrlq $32, %%mm2 \n\t"
  2510. "movd %%mm2, 80(%3) \n\t"
  2511. "movd %%mm1, 96(%3) \n\t"
  2512. "psrlq $32, %%mm1 \n\t"
  2513. "movd %%mm1, 112(%3) \n\t"
  2514. "movq (%0, %1, 4), %%mm0 \n\t" // 12345678
  2515. "movq (%%ebx), %%mm1 \n\t" // abcdefgh
  2516. "movq %%mm0, %%mm2 \n\t" // 12345678
  2517. "punpcklbw %%mm1, %%mm0 \n\t" // 1a2b3c4d
  2518. "punpckhbw %%mm1, %%mm2 \n\t" // 5e6f7g8h
  2519. "movq (%%ebx, %1), %%mm1 \n\t"
  2520. "movq (%%ebx, %1, 2), %%mm3 \n\t"
  2521. "movq %%mm1, %%mm4 \n\t"
  2522. "punpcklbw %%mm3, %%mm1 \n\t"
  2523. "punpckhbw %%mm3, %%mm4 \n\t"
  2524. "movq %%mm0, %%mm3 \n\t"
  2525. "punpcklwd %%mm1, %%mm0 \n\t"
  2526. "punpckhwd %%mm1, %%mm3 \n\t"
  2527. "movq %%mm2, %%mm1 \n\t"
  2528. "punpcklwd %%mm4, %%mm2 \n\t"
  2529. "punpckhwd %%mm4, %%mm1 \n\t"
  2530. "movd %%mm0, 132(%2) \n\t"
  2531. "psrlq $32, %%mm0 \n\t"
  2532. "movd %%mm0, 148(%2) \n\t"
  2533. "movd %%mm3, 164(%2) \n\t"
  2534. "psrlq $32, %%mm3 \n\t"
  2535. "movd %%mm3, 180(%2) \n\t"
  2536. "movd %%mm3, 52(%3) \n\t"
  2537. "movd %%mm2, 196(%2) \n\t"
  2538. "movd %%mm2, 68(%3) \n\t"
  2539. "psrlq $32, %%mm2 \n\t"
  2540. "movd %%mm2, 84(%3) \n\t"
  2541. "movd %%mm1, 100(%3) \n\t"
  2542. "psrlq $32, %%mm1 \n\t"
  2543. "movd %%mm1, 116(%3) \n\t"
  2544. :: "r" (src), "r" (srcStride), "r" (dst1), "r" (dst2)
  2545. : "%eax", "%ebx"
  2546. );
  2547. }
  2548. /**
  2549. * transposes the given 8x8 block
  2550. */
  2551. static inline void transpose2(uint8_t *dst, int dstStride, uint8_t *src)
  2552. {
  2553. asm(
  2554. "leal (%0, %1), %%eax \n\t"
  2555. "leal (%%eax, %1, 4), %%ebx \n\t"
  2556. // 0 1 2 3 4 5 6 7 8 9
  2557. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  2558. "movq (%2), %%mm0 \n\t" // 12345678
  2559. "movq 16(%2), %%mm1 \n\t" // abcdefgh
  2560. "movq %%mm0, %%mm2 \n\t" // 12345678
  2561. "punpcklbw %%mm1, %%mm0 \n\t" // 1a2b3c4d
  2562. "punpckhbw %%mm1, %%mm2 \n\t" // 5e6f7g8h
  2563. "movq 32(%2), %%mm1 \n\t"
  2564. "movq 48(%2), %%mm3 \n\t"
  2565. "movq %%mm1, %%mm4 \n\t"
  2566. "punpcklbw %%mm3, %%mm1 \n\t"
  2567. "punpckhbw %%mm3, %%mm4 \n\t"
  2568. "movq %%mm0, %%mm3 \n\t"
  2569. "punpcklwd %%mm1, %%mm0 \n\t"
  2570. "punpckhwd %%mm1, %%mm3 \n\t"
  2571. "movq %%mm2, %%mm1 \n\t"
  2572. "punpcklwd %%mm4, %%mm2 \n\t"
  2573. "punpckhwd %%mm4, %%mm1 \n\t"
  2574. "movd %%mm0, (%0) \n\t"
  2575. "psrlq $32, %%mm0 \n\t"
  2576. "movd %%mm0, (%%eax) \n\t"
  2577. "movd %%mm3, (%%eax, %1) \n\t"
  2578. "psrlq $32, %%mm3 \n\t"
  2579. "movd %%mm3, (%%eax, %1, 2) \n\t"
  2580. "movd %%mm2, (%0, %1, 4) \n\t"
  2581. "psrlq $32, %%mm2 \n\t"
  2582. "movd %%mm2, (%%ebx) \n\t"
  2583. "movd %%mm1, (%%ebx, %1) \n\t"
  2584. "psrlq $32, %%mm1 \n\t"
  2585. "movd %%mm1, (%%ebx, %1, 2) \n\t"
  2586. "movq 64(%2), %%mm0 \n\t" // 12345678
  2587. "movq 80(%2), %%mm1 \n\t" // abcdefgh
  2588. "movq %%mm0, %%mm2 \n\t" // 12345678
  2589. "punpcklbw %%mm1, %%mm0 \n\t" // 1a2b3c4d
  2590. "punpckhbw %%mm1, %%mm2 \n\t" // 5e6f7g8h
  2591. "movq 96(%2), %%mm1 \n\t"
  2592. "movq 112(%2), %%mm3 \n\t"
  2593. "movq %%mm1, %%mm4 \n\t"
  2594. "punpcklbw %%mm3, %%mm1 \n\t"
  2595. "punpckhbw %%mm3, %%mm4 \n\t"
  2596. "movq %%mm0, %%mm3 \n\t"
  2597. "punpcklwd %%mm1, %%mm0 \n\t"
  2598. "punpckhwd %%mm1, %%mm3 \n\t"
  2599. "movq %%mm2, %%mm1 \n\t"
  2600. "punpcklwd %%mm4, %%mm2 \n\t"
  2601. "punpckhwd %%mm4, %%mm1 \n\t"
  2602. "movd %%mm0, 4(%0) \n\t"
  2603. "psrlq $32, %%mm0 \n\t"
  2604. "movd %%mm0, 4(%%eax) \n\t"
  2605. "movd %%mm3, 4(%%eax, %1) \n\t"
  2606. "psrlq $32, %%mm3 \n\t"
  2607. "movd %%mm3, 4(%%eax, %1, 2) \n\t"
  2608. "movd %%mm2, 4(%0, %1, 4) \n\t"
  2609. "psrlq $32, %%mm2 \n\t"
  2610. "movd %%mm2, 4(%%ebx) \n\t"
  2611. "movd %%mm1, 4(%%ebx, %1) \n\t"
  2612. "psrlq $32, %%mm1 \n\t"
  2613. "movd %%mm1, 4(%%ebx, %1, 2) \n\t"
  2614. :: "r" (dst), "r" (dstStride), "r" (src)
  2615. : "%eax", "%ebx"
  2616. );
  2617. }
  2618. #endif
  2619. //static int test=0;
  2620. static void inline tempNoiseReducer(uint8_t *src, int stride,
  2621. uint8_t *tempBlured, uint32_t *tempBluredPast, int *maxNoise)
  2622. {
  2623. #define FAST_L2_DIFF
  2624. //#define L1_DIFF //u should change the thresholds too if u try that one
  2625. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  2626. asm volatile(
  2627. "leal (%2, %2, 2), %%eax \n\t" // 3*stride
  2628. "leal (%2, %2, 4), %%ebx \n\t" // 5*stride
  2629. "leal (%%ebx, %2, 2), %%ecx \n\t" // 7*stride
  2630. // 0 1 2 3 4 5 6 7 8 9
  2631. // %x %x+%2 %x+2%2 %x+eax %x+4%2 %x+ebx %x+2eax %x+ecx %x+8%2
  2632. //FIXME reorder?
  2633. #ifdef L1_DIFF //needs mmx2
  2634. "movq (%0), %%mm0 \n\t" // L0
  2635. "psadbw (%1), %%mm0 \n\t" // |L0-R0|
  2636. "movq (%0, %2), %%mm1 \n\t" // L1
  2637. "psadbw (%1, %2), %%mm1 \n\t" // |L1-R1|
  2638. "movq (%0, %2, 2), %%mm2 \n\t" // L2
  2639. "psadbw (%1, %2, 2), %%mm2 \n\t" // |L2-R2|
  2640. "movq (%0, %%eax), %%mm3 \n\t" // L3
  2641. "psadbw (%1, %%eax), %%mm3 \n\t" // |L3-R3|
  2642. "movq (%0, %2, 4), %%mm4 \n\t" // L4
  2643. "paddw %%mm1, %%mm0 \n\t"
  2644. "psadbw (%1, %2, 4), %%mm4 \n\t" // |L4-R4|
  2645. "movq (%0, %%ebx), %%mm5 \n\t" // L5
  2646. "paddw %%mm2, %%mm0 \n\t"
  2647. "psadbw (%1, %%ebx), %%mm5 \n\t" // |L5-R5|
  2648. "movq (%0, %%eax, 2), %%mm6 \n\t" // L6
  2649. "paddw %%mm3, %%mm0 \n\t"
  2650. "psadbw (%1, %%eax, 2), %%mm6 \n\t" // |L6-R6|
  2651. "movq (%0, %%ecx), %%mm7 \n\t" // L7
  2652. "paddw %%mm4, %%mm0 \n\t"
  2653. "psadbw (%1, %%ecx), %%mm7 \n\t" // |L7-R7|
  2654. "paddw %%mm5, %%mm6 \n\t"
  2655. "paddw %%mm7, %%mm6 \n\t"
  2656. "paddw %%mm6, %%mm0 \n\t"
  2657. #elif defined (FAST_L2_DIFF)
  2658. "pcmpeqb %%mm7, %%mm7 \n\t"
  2659. "movq b80, %%mm6 \n\t"
  2660. "pxor %%mm0, %%mm0 \n\t"
  2661. #define L2_DIFF_CORE(a, b)\
  2662. "movq " #a ", %%mm5 \n\t"\
  2663. "movq " #b ", %%mm2 \n\t"\
  2664. "pxor %%mm7, %%mm2 \n\t"\
  2665. PAVGB(%%mm2, %%mm5)\
  2666. "paddb %%mm6, %%mm5 \n\t"\
  2667. "movq %%mm5, %%mm2 \n\t"\
  2668. "psllw $8, %%mm5 \n\t"\
  2669. "pmaddwd %%mm5, %%mm5 \n\t"\
  2670. "pmaddwd %%mm2, %%mm2 \n\t"\
  2671. "paddd %%mm2, %%mm5 \n\t"\
  2672. "psrld $14, %%mm5 \n\t"\
  2673. "paddd %%mm5, %%mm0 \n\t"
  2674. L2_DIFF_CORE((%0), (%1))
  2675. L2_DIFF_CORE((%0, %2), (%1, %2))
  2676. L2_DIFF_CORE((%0, %2, 2), (%1, %2, 2))
  2677. L2_DIFF_CORE((%0, %%eax), (%1, %%eax))
  2678. L2_DIFF_CORE((%0, %2, 4), (%1, %2, 4))
  2679. L2_DIFF_CORE((%0, %%ebx), (%1, %%ebx))
  2680. L2_DIFF_CORE((%0, %%eax,2), (%1, %%eax,2))
  2681. L2_DIFF_CORE((%0, %%ecx), (%1, %%ecx))
  2682. #else
  2683. "pxor %%mm7, %%mm7 \n\t"
  2684. "pxor %%mm0, %%mm0 \n\t"
  2685. #define L2_DIFF_CORE(a, b)\
  2686. "movq " #a ", %%mm5 \n\t"\
  2687. "movq " #b ", %%mm2 \n\t"\
  2688. "movq %%mm5, %%mm1 \n\t"\
  2689. "movq %%mm2, %%mm3 \n\t"\
  2690. "punpcklbw %%mm7, %%mm5 \n\t"\
  2691. "punpckhbw %%mm7, %%mm1 \n\t"\
  2692. "punpcklbw %%mm7, %%mm2 \n\t"\
  2693. "punpckhbw %%mm7, %%mm3 \n\t"\
  2694. "psubw %%mm2, %%mm5 \n\t"\
  2695. "psubw %%mm3, %%mm1 \n\t"\
  2696. "pmaddwd %%mm5, %%mm5 \n\t"\
  2697. "pmaddwd %%mm1, %%mm1 \n\t"\
  2698. "paddd %%mm1, %%mm5 \n\t"\
  2699. "paddd %%mm5, %%mm0 \n\t"
  2700. L2_DIFF_CORE((%0), (%1))
  2701. L2_DIFF_CORE((%0, %2), (%1, %2))
  2702. L2_DIFF_CORE((%0, %2, 2), (%1, %2, 2))
  2703. L2_DIFF_CORE((%0, %%eax), (%1, %%eax))
  2704. L2_DIFF_CORE((%0, %2, 4), (%1, %2, 4))
  2705. L2_DIFF_CORE((%0, %%ebx), (%1, %%ebx))
  2706. L2_DIFF_CORE((%0, %%eax,2), (%1, %%eax,2))
  2707. L2_DIFF_CORE((%0, %%ecx), (%1, %%ecx))
  2708. #endif
  2709. "movq %%mm0, %%mm4 \n\t"
  2710. "psrlq $32, %%mm0 \n\t"
  2711. "paddd %%mm0, %%mm4 \n\t"
  2712. "movd %%mm4, %%ecx \n\t"
  2713. "shll $2, %%ecx \n\t"
  2714. "movl %3, %%ebx \n\t"
  2715. "addl -4(%%ebx), %%ecx \n\t"
  2716. "addl 4(%%ebx), %%ecx \n\t"
  2717. "addl -1024(%%ebx), %%ecx \n\t"
  2718. "addl $4, %%ecx \n\t"
  2719. "addl 1024(%%ebx), %%ecx \n\t"
  2720. "shrl $3, %%ecx \n\t"
  2721. "movl %%ecx, (%%ebx) \n\t"
  2722. "leal (%%eax, %2, 2), %%ebx \n\t" // 5*stride
  2723. // "movl %3, %%ecx \n\t"
  2724. // "movl %%ecx, test \n\t"
  2725. // "jmp 4f \n\t"
  2726. "cmpl 4+maxTmpNoise, %%ecx \n\t"
  2727. " jb 2f \n\t"
  2728. "cmpl 8+maxTmpNoise, %%ecx \n\t"
  2729. " jb 1f \n\t"
  2730. "leal (%%ebx, %2, 2), %%ecx \n\t" // 7*stride
  2731. "movq (%0), %%mm0 \n\t" // L0
  2732. "movq (%0, %2), %%mm1 \n\t" // L1
  2733. "movq (%0, %2, 2), %%mm2 \n\t" // L2
  2734. "movq (%0, %%eax), %%mm3 \n\t" // L3
  2735. "movq (%0, %2, 4), %%mm4 \n\t" // L4
  2736. "movq (%0, %%ebx), %%mm5 \n\t" // L5
  2737. "movq (%0, %%eax, 2), %%mm6 \n\t" // L6
  2738. "movq (%0, %%ecx), %%mm7 \n\t" // L7
  2739. "movq %%mm0, (%1) \n\t" // L0
  2740. "movq %%mm1, (%1, %2) \n\t" // L1
  2741. "movq %%mm2, (%1, %2, 2) \n\t" // L2
  2742. "movq %%mm3, (%1, %%eax) \n\t" // L3
  2743. "movq %%mm4, (%1, %2, 4) \n\t" // L4
  2744. "movq %%mm5, (%1, %%ebx) \n\t" // L5
  2745. "movq %%mm6, (%1, %%eax, 2) \n\t" // L6
  2746. "movq %%mm7, (%1, %%ecx) \n\t" // L7
  2747. "jmp 4f \n\t"
  2748. "1: \n\t"
  2749. "leal (%%ebx, %2, 2), %%ecx \n\t" // 7*stride
  2750. "movq (%0), %%mm0 \n\t" // L0
  2751. "pavgb (%1), %%mm0 \n\t" // L0
  2752. "movq (%0, %2), %%mm1 \n\t" // L1
  2753. "pavgb (%1, %2), %%mm1 \n\t" // L1
  2754. "movq (%0, %2, 2), %%mm2 \n\t" // L2
  2755. "pavgb (%1, %2, 2), %%mm2 \n\t" // L2
  2756. "movq (%0, %%eax), %%mm3 \n\t" // L3
  2757. "pavgb (%1, %%eax), %%mm3 \n\t" // L3
  2758. "movq (%0, %2, 4), %%mm4 \n\t" // L4
  2759. "pavgb (%1, %2, 4), %%mm4 \n\t" // L4
  2760. "movq (%0, %%ebx), %%mm5 \n\t" // L5
  2761. "pavgb (%1, %%ebx), %%mm5 \n\t" // L5
  2762. "movq (%0, %%eax, 2), %%mm6 \n\t" // L6
  2763. "pavgb (%1, %%eax, 2), %%mm6 \n\t" // L6
  2764. "movq (%0, %%ecx), %%mm7 \n\t" // L7
  2765. "pavgb (%1, %%ecx), %%mm7 \n\t" // L7
  2766. "movq %%mm0, (%1) \n\t" // R0
  2767. "movq %%mm1, (%1, %2) \n\t" // R1
  2768. "movq %%mm2, (%1, %2, 2) \n\t" // R2
  2769. "movq %%mm3, (%1, %%eax) \n\t" // R3
  2770. "movq %%mm4, (%1, %2, 4) \n\t" // R4
  2771. "movq %%mm5, (%1, %%ebx) \n\t" // R5
  2772. "movq %%mm6, (%1, %%eax, 2) \n\t" // R6
  2773. "movq %%mm7, (%1, %%ecx) \n\t" // R7
  2774. "movq %%mm0, (%0) \n\t" // L0
  2775. "movq %%mm1, (%0, %2) \n\t" // L1
  2776. "movq %%mm2, (%0, %2, 2) \n\t" // L2
  2777. "movq %%mm3, (%0, %%eax) \n\t" // L3
  2778. "movq %%mm4, (%0, %2, 4) \n\t" // L4
  2779. "movq %%mm5, (%0, %%ebx) \n\t" // L5
  2780. "movq %%mm6, (%0, %%eax, 2) \n\t" // L6
  2781. "movq %%mm7, (%0, %%ecx) \n\t" // L7
  2782. "jmp 4f \n\t"
  2783. "2: \n\t"
  2784. "cmpl maxTmpNoise, %%ecx \n\t"
  2785. " jb 3f \n\t"
  2786. "leal (%%ebx, %2, 2), %%ecx \n\t" // 7*stride
  2787. "movq (%0), %%mm0 \n\t" // L0
  2788. "movq (%0, %2), %%mm1 \n\t" // L1
  2789. "movq (%0, %2, 2), %%mm2 \n\t" // L2
  2790. "movq (%0, %%eax), %%mm3 \n\t" // L3
  2791. "movq (%1), %%mm4 \n\t" // R0
  2792. "movq (%1, %2), %%mm5 \n\t" // R1
  2793. "movq (%1, %2, 2), %%mm6 \n\t" // R2
  2794. "movq (%1, %%eax), %%mm7 \n\t" // R3
  2795. PAVGB(%%mm4, %%mm0)
  2796. PAVGB(%%mm5, %%mm1)
  2797. PAVGB(%%mm6, %%mm2)
  2798. PAVGB(%%mm7, %%mm3)
  2799. PAVGB(%%mm4, %%mm0)
  2800. PAVGB(%%mm5, %%mm1)
  2801. PAVGB(%%mm6, %%mm2)
  2802. PAVGB(%%mm7, %%mm3)
  2803. "movq %%mm0, (%1) \n\t" // R0
  2804. "movq %%mm1, (%1, %2) \n\t" // R1
  2805. "movq %%mm2, (%1, %2, 2) \n\t" // R2
  2806. "movq %%mm3, (%1, %%eax) \n\t" // R3
  2807. "movq %%mm0, (%0) \n\t" // L0
  2808. "movq %%mm1, (%0, %2) \n\t" // L1
  2809. "movq %%mm2, (%0, %2, 2) \n\t" // L2
  2810. "movq %%mm3, (%0, %%eax) \n\t" // L3
  2811. "movq (%0, %2, 4), %%mm0 \n\t" // L4
  2812. "movq (%0, %%ebx), %%mm1 \n\t" // L5
  2813. "movq (%0, %%eax, 2), %%mm2 \n\t" // L6
  2814. "movq (%0, %%ecx), %%mm3 \n\t" // L7
  2815. "movq (%1, %2, 4), %%mm4 \n\t" // R4
  2816. "movq (%1, %%ebx), %%mm5 \n\t" // R5
  2817. "movq (%1, %%eax, 2), %%mm6 \n\t" // R6
  2818. "movq (%1, %%ecx), %%mm7 \n\t" // R7
  2819. PAVGB(%%mm4, %%mm0)
  2820. PAVGB(%%mm5, %%mm1)
  2821. PAVGB(%%mm6, %%mm2)
  2822. PAVGB(%%mm7, %%mm3)
  2823. PAVGB(%%mm4, %%mm0)
  2824. PAVGB(%%mm5, %%mm1)
  2825. PAVGB(%%mm6, %%mm2)
  2826. PAVGB(%%mm7, %%mm3)
  2827. "movq %%mm0, (%1, %2, 4) \n\t" // R4
  2828. "movq %%mm1, (%1, %%ebx) \n\t" // R5
  2829. "movq %%mm2, (%1, %%eax, 2) \n\t" // R6
  2830. "movq %%mm3, (%1, %%ecx) \n\t" // R7
  2831. "movq %%mm0, (%0, %2, 4) \n\t" // L4
  2832. "movq %%mm1, (%0, %%ebx) \n\t" // L5
  2833. "movq %%mm2, (%0, %%eax, 2) \n\t" // L6
  2834. "movq %%mm3, (%0, %%ecx) \n\t" // L7
  2835. "jmp 4f \n\t"
  2836. "3: \n\t"
  2837. "leal (%%ebx, %2, 2), %%ecx \n\t" // 7*stride
  2838. "movq (%0), %%mm0 \n\t" // L0
  2839. "movq (%0, %2), %%mm1 \n\t" // L1
  2840. "movq (%0, %2, 2), %%mm2 \n\t" // L2
  2841. "movq (%0, %%eax), %%mm3 \n\t" // L3
  2842. "movq (%1), %%mm4 \n\t" // R0
  2843. "movq (%1, %2), %%mm5 \n\t" // R1
  2844. "movq (%1, %2, 2), %%mm6 \n\t" // R2
  2845. "movq (%1, %%eax), %%mm7 \n\t" // R3
  2846. PAVGB(%%mm4, %%mm0)
  2847. PAVGB(%%mm5, %%mm1)
  2848. PAVGB(%%mm6, %%mm2)
  2849. PAVGB(%%mm7, %%mm3)
  2850. PAVGB(%%mm4, %%mm0)
  2851. PAVGB(%%mm5, %%mm1)
  2852. PAVGB(%%mm6, %%mm2)
  2853. PAVGB(%%mm7, %%mm3)
  2854. PAVGB(%%mm4, %%mm0)
  2855. PAVGB(%%mm5, %%mm1)
  2856. PAVGB(%%mm6, %%mm2)
  2857. PAVGB(%%mm7, %%mm3)
  2858. "movq %%mm0, (%1) \n\t" // R0
  2859. "movq %%mm1, (%1, %2) \n\t" // R1
  2860. "movq %%mm2, (%1, %2, 2) \n\t" // R2
  2861. "movq %%mm3, (%1, %%eax) \n\t" // R3
  2862. "movq %%mm0, (%0) \n\t" // L0
  2863. "movq %%mm1, (%0, %2) \n\t" // L1
  2864. "movq %%mm2, (%0, %2, 2) \n\t" // L2
  2865. "movq %%mm3, (%0, %%eax) \n\t" // L3
  2866. "movq (%0, %2, 4), %%mm0 \n\t" // L4
  2867. "movq (%0, %%ebx), %%mm1 \n\t" // L5
  2868. "movq (%0, %%eax, 2), %%mm2 \n\t" // L6
  2869. "movq (%0, %%ecx), %%mm3 \n\t" // L7
  2870. "movq (%1, %2, 4), %%mm4 \n\t" // R4
  2871. "movq (%1, %%ebx), %%mm5 \n\t" // R5
  2872. "movq (%1, %%eax, 2), %%mm6 \n\t" // R6
  2873. "movq (%1, %%ecx), %%mm7 \n\t" // R7
  2874. PAVGB(%%mm4, %%mm0)
  2875. PAVGB(%%mm5, %%mm1)
  2876. PAVGB(%%mm6, %%mm2)
  2877. PAVGB(%%mm7, %%mm3)
  2878. PAVGB(%%mm4, %%mm0)
  2879. PAVGB(%%mm5, %%mm1)
  2880. PAVGB(%%mm6, %%mm2)
  2881. PAVGB(%%mm7, %%mm3)
  2882. PAVGB(%%mm4, %%mm0)
  2883. PAVGB(%%mm5, %%mm1)
  2884. PAVGB(%%mm6, %%mm2)
  2885. PAVGB(%%mm7, %%mm3)
  2886. "movq %%mm0, (%1, %2, 4) \n\t" // R4
  2887. "movq %%mm1, (%1, %%ebx) \n\t" // R5
  2888. "movq %%mm2, (%1, %%eax, 2) \n\t" // R6
  2889. "movq %%mm3, (%1, %%ecx) \n\t" // R7
  2890. "movq %%mm0, (%0, %2, 4) \n\t" // L4
  2891. "movq %%mm1, (%0, %%ebx) \n\t" // L5
  2892. "movq %%mm2, (%0, %%eax, 2) \n\t" // L6
  2893. "movq %%mm3, (%0, %%ecx) \n\t" // L7
  2894. "4: \n\t"
  2895. :: "r" (src), "r" (tempBlured), "r"(stride), "m" (tempBluredPast)
  2896. : "%eax", "%ebx", "%ecx", "memory"
  2897. );
  2898. //printf("%d\n", test);
  2899. #else
  2900. int y;
  2901. int d=0;
  2902. int sysd=0;
  2903. int i;
  2904. for(y=0; y<8; y++)
  2905. {
  2906. int x;
  2907. for(x=0; x<8; x++)
  2908. {
  2909. int ref= tempBlured[ x + y*stride ];
  2910. int cur= src[ x + y*stride ];
  2911. int d1=ref - cur;
  2912. // if(x==0 || x==7) d1+= d1>>1;
  2913. // if(y==0 || y==7) d1+= d1>>1;
  2914. // d+= ABS(d1);
  2915. d+= d1*d1;
  2916. sysd+= d1;
  2917. }
  2918. }
  2919. i=d;
  2920. d= (
  2921. 4*d
  2922. +(*(tempBluredPast-256))
  2923. +(*(tempBluredPast-1))+ (*(tempBluredPast+1))
  2924. +(*(tempBluredPast+256))
  2925. +4)>>3;
  2926. *tempBluredPast=i;
  2927. // ((*tempBluredPast)*3 + d + 2)>>2;
  2928. //printf("%d %d %d\n", maxNoise[0], maxNoise[1], maxNoise[2]);
  2929. /*
  2930. Switch between
  2931. 1 0 0 0 0 0 0 (0)
  2932. 64 32 16 8 4 2 1 (1)
  2933. 64 48 36 27 20 15 11 (33) (approx)
  2934. 64 56 49 43 37 33 29 (200) (approx)
  2935. */
  2936. if(d > maxNoise[1])
  2937. {
  2938. if(d < maxNoise[2])
  2939. {
  2940. for(y=0; y<8; y++)
  2941. {
  2942. int x;
  2943. for(x=0; x<8; x++)
  2944. {
  2945. int ref= tempBlured[ x + y*stride ];
  2946. int cur= src[ x + y*stride ];
  2947. tempBlured[ x + y*stride ]=
  2948. src[ x + y*stride ]=
  2949. (ref + cur + 1)>>1;
  2950. }
  2951. }
  2952. }
  2953. else
  2954. {
  2955. for(y=0; y<8; y++)
  2956. {
  2957. int x;
  2958. for(x=0; x<8; x++)
  2959. {
  2960. tempBlured[ x + y*stride ]= src[ x + y*stride ];
  2961. }
  2962. }
  2963. }
  2964. }
  2965. else
  2966. {
  2967. if(d < maxNoise[0])
  2968. {
  2969. for(y=0; y<8; y++)
  2970. {
  2971. int x;
  2972. for(x=0; x<8; x++)
  2973. {
  2974. int ref= tempBlured[ x + y*stride ];
  2975. int cur= src[ x + y*stride ];
  2976. tempBlured[ x + y*stride ]=
  2977. src[ x + y*stride ]=
  2978. (ref*7 + cur + 4)>>3;
  2979. }
  2980. }
  2981. }
  2982. else
  2983. {
  2984. for(y=0; y<8; y++)
  2985. {
  2986. int x;
  2987. for(x=0; x<8; x++)
  2988. {
  2989. int ref= tempBlured[ x + y*stride ];
  2990. int cur= src[ x + y*stride ];
  2991. tempBlured[ x + y*stride ]=
  2992. src[ x + y*stride ]=
  2993. (ref*3 + cur + 2)>>2;
  2994. }
  2995. }
  2996. }
  2997. }
  2998. #endif
  2999. }
  3000. #ifdef HAVE_ODIVX_POSTPROCESS
  3001. #include "../opendivx/postprocess.h"
  3002. int use_old_pp=0;
  3003. #endif
  3004. static void postProcess(uint8_t src[], int srcStride, uint8_t dst[], int dstStride, int width, int height,
  3005. QP_STORE_T QPs[], int QPStride, int isColor, struct PPMode *ppMode);
  3006. /* -pp Command line Help
  3007. NOTE/FIXME: put this at an appropriate place (--help, html docs, man mplayer)?
  3008. -pp <filterName>[:<option>[:<option>...]][,[-]<filterName>[:<option>...]]...
  3009. long form example:
  3010. -pp vdeblock:autoq,hdeblock:autoq,linblenddeint -pp default,-vdeblock
  3011. short form example:
  3012. -pp vb:a,hb:a,lb -pp de,-vb
  3013. more examples:
  3014. -pp tn:64:128:256
  3015. Filters Options
  3016. short long name short long option Description
  3017. * * a autoq cpu power dependant enabler
  3018. c chrom chrominance filtring enabled
  3019. y nochrom chrominance filtring disabled
  3020. hb hdeblock horizontal deblocking filter
  3021. vb vdeblock vertical deblocking filter
  3022. vr rkvdeblock
  3023. h1 x1hdeblock Experimental horizontal deblock filter 1
  3024. v1 x1vdeblock Experimental vertical deblock filter 1
  3025. dr dering not implemented yet
  3026. al autolevels automatic brightness / contrast fixer
  3027. f fullyrange stretch luminance range to (0..255)
  3028. lb linblenddeint linear blend deinterlacer
  3029. li linipoldeint linear interpolating deinterlacer
  3030. ci cubicipoldeint cubic interpolating deinterlacer
  3031. md mediandeint median deinterlacer
  3032. de default hdeblock:a,vdeblock:a,dering:a,autolevels
  3033. fa fast x1hdeblock:a,x1vdeblock:a,dering:a,autolevels
  3034. tn tmpnoise (3 Thresholds) Temporal Noise Reducer
  3035. */
  3036. /**
  3037. * returns a PPMode struct which will have a non 0 error variable if an error occured
  3038. * name is the string after "-pp" on the command line
  3039. * quality is a number from 0 to GET_PP_QUALITY_MAX
  3040. */
  3041. struct PPMode getPPModeByNameAndQuality(char *name, int quality)
  3042. {
  3043. char temp[GET_MODE_BUFFER_SIZE];
  3044. char *p= temp;
  3045. char *filterDelimiters= ",";
  3046. char *optionDelimiters= ":";
  3047. struct PPMode ppMode= {0,0,0,0,0,0,{150,200,400}};
  3048. char *filterToken;
  3049. strncpy(temp, name, GET_MODE_BUFFER_SIZE);
  3050. printf("%s\n", name);
  3051. for(;;){
  3052. char *filterName;
  3053. int q= 1000000; //GET_PP_QUALITY_MAX;
  3054. int chrom=-1;
  3055. char *option;
  3056. char *options[OPTIONS_ARRAY_SIZE];
  3057. int i;
  3058. int filterNameOk=0;
  3059. int numOfUnknownOptions=0;
  3060. int enable=1; //does the user want us to enabled or disabled the filter
  3061. filterToken= strtok(p, filterDelimiters);
  3062. if(filterToken == NULL) break;
  3063. p+= strlen(filterToken) + 1; // p points to next filterToken
  3064. filterName= strtok(filterToken, optionDelimiters);
  3065. printf("%s::%s\n", filterToken, filterName);
  3066. if(*filterName == '-')
  3067. {
  3068. enable=0;
  3069. filterName++;
  3070. }
  3071. for(;;){ //for all options
  3072. option= strtok(NULL, optionDelimiters);
  3073. if(option == NULL) break;
  3074. printf("%s\n", option);
  3075. if(!strcmp("autoq", option) || !strcmp("a", option)) q= quality;
  3076. else if(!strcmp("nochrom", option) || !strcmp("y", option)) chrom=0;
  3077. else if(!strcmp("chrom", option) || !strcmp("c", option)) chrom=1;
  3078. else
  3079. {
  3080. options[numOfUnknownOptions] = option;
  3081. numOfUnknownOptions++;
  3082. }
  3083. if(numOfUnknownOptions >= OPTIONS_ARRAY_SIZE-1) break;
  3084. }
  3085. options[numOfUnknownOptions] = NULL;
  3086. /* replace stuff from the replace Table */
  3087. for(i=0; replaceTable[2*i]!=NULL; i++)
  3088. {
  3089. if(!strcmp(replaceTable[2*i], filterName))
  3090. {
  3091. int newlen= strlen(replaceTable[2*i + 1]);
  3092. int plen;
  3093. int spaceLeft;
  3094. if(p==NULL) p= temp, *p=0; //last filter
  3095. else p--, *p=','; //not last filter
  3096. plen= strlen(p);
  3097. spaceLeft= (int)p - (int)temp + plen;
  3098. if(spaceLeft + newlen >= GET_MODE_BUFFER_SIZE)
  3099. {
  3100. ppMode.error++;
  3101. break;
  3102. }
  3103. memmove(p + newlen, p, plen+1);
  3104. memcpy(p, replaceTable[2*i + 1], newlen);
  3105. filterNameOk=1;
  3106. }
  3107. }
  3108. for(i=0; filters[i].shortName!=NULL; i++)
  3109. {
  3110. // printf("Compareing %s, %s, %s\n", filters[i].shortName,filters[i].longName, filterName);
  3111. if( !strcmp(filters[i].longName, filterName)
  3112. || !strcmp(filters[i].shortName, filterName))
  3113. {
  3114. ppMode.lumMode &= ~filters[i].mask;
  3115. ppMode.chromMode &= ~filters[i].mask;
  3116. filterNameOk=1;
  3117. if(!enable) break; // user wants to disable it
  3118. if(q >= filters[i].minLumQuality)
  3119. ppMode.lumMode|= filters[i].mask;
  3120. if(chrom==1 || (chrom==-1 && filters[i].chromDefault))
  3121. if(q >= filters[i].minChromQuality)
  3122. ppMode.chromMode|= filters[i].mask;
  3123. if(filters[i].mask == LEVEL_FIX)
  3124. {
  3125. int o;
  3126. ppMode.minAllowedY= 16;
  3127. ppMode.maxAllowedY= 234;
  3128. for(o=0; options[o]!=NULL; o++)
  3129. if( !strcmp(options[o],"fullyrange")
  3130. ||!strcmp(options[o],"f"))
  3131. {
  3132. ppMode.minAllowedY= 0;
  3133. ppMode.maxAllowedY= 255;
  3134. numOfUnknownOptions--;
  3135. }
  3136. }
  3137. else if(filters[i].mask == TEMP_NOISE_FILTER)
  3138. {
  3139. int o;
  3140. int numOfNoises=0;
  3141. ppMode.maxTmpNoise[0]= 150;
  3142. ppMode.maxTmpNoise[1]= 200;
  3143. ppMode.maxTmpNoise[2]= 400;
  3144. for(o=0; options[o]!=NULL; o++)
  3145. {
  3146. char *tail;
  3147. ppMode.maxTmpNoise[numOfNoises]=
  3148. strtol(options[o], &tail, 0);
  3149. if(tail!=options[o])
  3150. {
  3151. numOfNoises++;
  3152. numOfUnknownOptions--;
  3153. if(numOfNoises >= 3) break;
  3154. }
  3155. }
  3156. }
  3157. }
  3158. }
  3159. if(!filterNameOk) ppMode.error++;
  3160. ppMode.error += numOfUnknownOptions;
  3161. }
  3162. #ifdef HAVE_ODIVX_POSTPROCESS
  3163. if(ppMode.lumMode & H_DEBLOCK) ppMode.oldMode |= PP_DEBLOCK_Y_H;
  3164. if(ppMode.lumMode & V_DEBLOCK) ppMode.oldMode |= PP_DEBLOCK_Y_V;
  3165. if(ppMode.chromMode & H_DEBLOCK) ppMode.oldMode |= PP_DEBLOCK_C_H;
  3166. if(ppMode.chromMode & V_DEBLOCK) ppMode.oldMode |= PP_DEBLOCK_C_V;
  3167. if(ppMode.lumMode & DERING) ppMode.oldMode |= PP_DERING_Y;
  3168. if(ppMode.chromMode & DERING) ppMode.oldMode |= PP_DERING_C;
  3169. #endif
  3170. return ppMode;
  3171. }
  3172. /**
  3173. * Obsolete, dont use it, use postprocess2() instead
  3174. */
  3175. void postprocess(unsigned char * src[], int src_stride,
  3176. unsigned char * dst[], int dst_stride,
  3177. int horizontal_size, int vertical_size,
  3178. QP_STORE_T *QP_store, int QP_stride,
  3179. int mode)
  3180. {
  3181. struct PPMode ppMode;
  3182. static QP_STORE_T zeroArray[2048/8];
  3183. /*
  3184. static int qual=0;
  3185. ppMode= getPPModeByNameAndQuality("fast,default,-hdeblock,-vdeblock,tmpnoise:150:200:300", qual);
  3186. printf("OK\n");
  3187. qual++;
  3188. qual%=7;
  3189. printf("\n%X %X %X %X :%d: %d %d %d\n", ppMode.lumMode, ppMode.chromMode, ppMode.oldMode, ppMode.error,
  3190. qual, ppMode.maxTmpNoise[0], ppMode.maxTmpNoise[1], ppMode.maxTmpNoise[2]);
  3191. postprocess2(src, src_stride, dst, dst_stride,
  3192. horizontal_size, vertical_size, QP_store, QP_stride, &ppMode);
  3193. return;
  3194. */
  3195. if(QP_store==NULL)
  3196. {
  3197. QP_store= zeroArray;
  3198. QP_stride= 0;
  3199. }
  3200. ppMode.lumMode= mode;
  3201. mode= ((mode&0xFF)>>4) | (mode&0xFFFFFF00);
  3202. ppMode.chromMode= mode;
  3203. ppMode.maxTmpNoise[0]= 700;
  3204. ppMode.maxTmpNoise[1]= 1500;
  3205. ppMode.maxTmpNoise[2]= 3000;
  3206. #ifdef HAVE_ODIVX_POSTPROCESS
  3207. // Note: I could make this shit outside of this file, but it would mean one
  3208. // more function call...
  3209. if(use_old_pp){
  3210. odivx_postprocess(src,src_stride,dst,dst_stride,horizontal_size,vertical_size,QP_store,QP_stride,mode);
  3211. return;
  3212. }
  3213. #endif
  3214. postProcess(src[0], src_stride, dst[0], dst_stride,
  3215. horizontal_size, vertical_size, QP_store, QP_stride, 0, &ppMode);
  3216. horizontal_size >>= 1;
  3217. vertical_size >>= 1;
  3218. src_stride >>= 1;
  3219. dst_stride >>= 1;
  3220. // mode&= ~(LINEAR_IPOL_DEINT_FILTER | LINEAR_BLEND_DEINT_FILTER |
  3221. // MEDIAN_DEINT_FILTER | CUBIC_IPOL_DEINT_FILTER);
  3222. if(1)
  3223. {
  3224. postProcess(src[1], src_stride, dst[1], dst_stride,
  3225. horizontal_size, vertical_size, QP_store, QP_stride, 1, &ppMode);
  3226. postProcess(src[2], src_stride, dst[2], dst_stride,
  3227. horizontal_size, vertical_size, QP_store, QP_stride, 2, &ppMode);
  3228. }
  3229. else
  3230. {
  3231. memset(dst[1], 128, dst_stride*vertical_size);
  3232. memset(dst[2], 128, dst_stride*vertical_size);
  3233. // memcpy(dst[1], src[1], src_stride*horizontal_size);
  3234. // memcpy(dst[2], src[2], src_stride*horizontal_size);
  3235. }
  3236. }
  3237. void postprocess2(unsigned char * src[], int src_stride,
  3238. unsigned char * dst[], int dst_stride,
  3239. int horizontal_size, int vertical_size,
  3240. QP_STORE_T *QP_store, int QP_stride,
  3241. struct PPMode *mode)
  3242. {
  3243. static QP_STORE_T zeroArray[2048/8];
  3244. if(QP_store==NULL)
  3245. {
  3246. QP_store= zeroArray;
  3247. QP_stride= 0;
  3248. }
  3249. #ifdef HAVE_ODIVX_POSTPROCESS
  3250. // Note: I could make this shit outside of this file, but it would mean one
  3251. // more function call...
  3252. if(use_old_pp){
  3253. odivx_postprocess(src,src_stride,dst,dst_stride,horizontal_size,vertical_size,QP_store,QP_stride,
  3254. mode->oldMode);
  3255. return;
  3256. }
  3257. #endif
  3258. postProcess(src[0], src_stride, dst[0], dst_stride,
  3259. horizontal_size, vertical_size, QP_store, QP_stride, 0, mode);
  3260. horizontal_size >>= 1;
  3261. vertical_size >>= 1;
  3262. src_stride >>= 1;
  3263. dst_stride >>= 1;
  3264. postProcess(src[1], src_stride, dst[1], dst_stride,
  3265. horizontal_size, vertical_size, QP_store, QP_stride, 1, mode);
  3266. postProcess(src[2], src_stride, dst[2], dst_stride,
  3267. horizontal_size, vertical_size, QP_store, QP_stride, 2, mode);
  3268. }
  3269. /**
  3270. * gets the mode flags for a given quality (larger values mean slower but better postprocessing)
  3271. * 0 <= quality <= 6
  3272. */
  3273. int getPpModeForQuality(int quality){
  3274. int modes[1+GET_PP_QUALITY_MAX]= {
  3275. 0,
  3276. #if 1
  3277. // horizontal filters first
  3278. LUM_H_DEBLOCK,
  3279. LUM_H_DEBLOCK | LUM_V_DEBLOCK,
  3280. LUM_H_DEBLOCK | LUM_V_DEBLOCK | CHROM_H_DEBLOCK,
  3281. LUM_H_DEBLOCK | LUM_V_DEBLOCK | CHROM_H_DEBLOCK | CHROM_V_DEBLOCK,
  3282. LUM_H_DEBLOCK | LUM_V_DEBLOCK | CHROM_H_DEBLOCK | CHROM_V_DEBLOCK | LUM_DERING,
  3283. LUM_H_DEBLOCK | LUM_V_DEBLOCK | CHROM_H_DEBLOCK | CHROM_V_DEBLOCK | LUM_DERING | CHROM_DERING
  3284. #else
  3285. // vertical filters first
  3286. LUM_V_DEBLOCK,
  3287. LUM_V_DEBLOCK | LUM_H_DEBLOCK,
  3288. LUM_V_DEBLOCK | LUM_H_DEBLOCK | CHROM_V_DEBLOCK,
  3289. LUM_V_DEBLOCK | LUM_H_DEBLOCK | CHROM_V_DEBLOCK | CHROM_H_DEBLOCK,
  3290. LUM_V_DEBLOCK | LUM_H_DEBLOCK | CHROM_V_DEBLOCK | CHROM_H_DEBLOCK | LUM_DERING,
  3291. LUM_V_DEBLOCK | LUM_H_DEBLOCK | CHROM_V_DEBLOCK | CHROM_H_DEBLOCK | LUM_DERING | CHROM_DERING
  3292. #endif
  3293. };
  3294. #ifdef HAVE_ODIVX_POSTPROCESS
  3295. int odivx_modes[1+GET_PP_QUALITY_MAX]= {
  3296. 0,
  3297. PP_DEBLOCK_Y_H,
  3298. PP_DEBLOCK_Y_H|PP_DEBLOCK_Y_V,
  3299. PP_DEBLOCK_Y_H|PP_DEBLOCK_Y_V|PP_DEBLOCK_C_H,
  3300. PP_DEBLOCK_Y_H|PP_DEBLOCK_Y_V|PP_DEBLOCK_C_H|PP_DEBLOCK_C_V,
  3301. PP_DEBLOCK_Y_H|PP_DEBLOCK_Y_V|PP_DEBLOCK_C_H|PP_DEBLOCK_C_V|PP_DERING_Y,
  3302. PP_DEBLOCK_Y_H|PP_DEBLOCK_Y_V|PP_DEBLOCK_C_H|PP_DEBLOCK_C_V|PP_DERING_Y|PP_DERING_C
  3303. };
  3304. if(use_old_pp) return odivx_modes[quality];
  3305. #endif
  3306. return modes[quality];
  3307. }
  3308. /**
  3309. * Copies a block from src to dst and fixes the blacklevel
  3310. * numLines must be a multiple of 4
  3311. * levelFix == 0 -> dont touch the brighness & contrast
  3312. */
  3313. static inline void blockCopy(uint8_t dst[], int dstStride, uint8_t src[], int srcStride,
  3314. int numLines, int levelFix)
  3315. {
  3316. #ifndef HAVE_MMX
  3317. int i;
  3318. #endif
  3319. if(levelFix)
  3320. {
  3321. #ifdef HAVE_MMX
  3322. asm volatile(
  3323. "leal (%2,%2), %%eax \n\t"
  3324. "leal (%3,%3), %%ebx \n\t"
  3325. "movq packedYOffset, %%mm2 \n\t"
  3326. "movq packedYScale, %%mm3 \n\t"
  3327. "pxor %%mm4, %%mm4 \n\t"
  3328. #define SCALED_CPY \
  3329. "movq (%0), %%mm0 \n\t"\
  3330. "movq (%0), %%mm5 \n\t"\
  3331. "punpcklbw %%mm4, %%mm0 \n\t"\
  3332. "punpckhbw %%mm4, %%mm5 \n\t"\
  3333. "psubw %%mm2, %%mm0 \n\t"\
  3334. "psubw %%mm2, %%mm5 \n\t"\
  3335. "movq (%0,%2), %%mm1 \n\t"\
  3336. "psllw $6, %%mm0 \n\t"\
  3337. "psllw $6, %%mm5 \n\t"\
  3338. "pmulhw %%mm3, %%mm0 \n\t"\
  3339. "movq (%0,%2), %%mm6 \n\t"\
  3340. "pmulhw %%mm3, %%mm5 \n\t"\
  3341. "punpcklbw %%mm4, %%mm1 \n\t"\
  3342. "punpckhbw %%mm4, %%mm6 \n\t"\
  3343. "psubw %%mm2, %%mm1 \n\t"\
  3344. "psubw %%mm2, %%mm6 \n\t"\
  3345. "psllw $6, %%mm1 \n\t"\
  3346. "psllw $6, %%mm6 \n\t"\
  3347. "pmulhw %%mm3, %%mm1 \n\t"\
  3348. "pmulhw %%mm3, %%mm6 \n\t"\
  3349. "addl %%eax, %0 \n\t"\
  3350. "packuswb %%mm5, %%mm0 \n\t"\
  3351. "packuswb %%mm6, %%mm1 \n\t"\
  3352. "movq %%mm0, (%1) \n\t"\
  3353. "movq %%mm1, (%1, %3) \n\t"\
  3354. SCALED_CPY
  3355. "addl %%ebx, %1 \n\t"
  3356. SCALED_CPY
  3357. "addl %%ebx, %1 \n\t"
  3358. SCALED_CPY
  3359. "addl %%ebx, %1 \n\t"
  3360. SCALED_CPY
  3361. : "+r"(src),
  3362. "+r"(dst)
  3363. :"r" (srcStride),
  3364. "r" (dstStride)
  3365. : "%eax", "%ebx"
  3366. );
  3367. #else
  3368. for(i=0; i<numLines; i++)
  3369. memcpy( &(dst[dstStride*i]),
  3370. &(src[srcStride*i]), BLOCK_SIZE);
  3371. #endif
  3372. }
  3373. else
  3374. {
  3375. #ifdef HAVE_MMX
  3376. asm volatile(
  3377. "movl %4, %%eax \n\t"
  3378. "movl %%eax, temp0\n\t"
  3379. "pushl %0 \n\t"
  3380. "pushl %1 \n\t"
  3381. "leal (%2,%2), %%eax \n\t"
  3382. "leal (%3,%3), %%ebx \n\t"
  3383. "movq packedYOffset, %%mm2 \n\t"
  3384. "movq packedYScale, %%mm3 \n\t"
  3385. #define SIMPLE_CPY \
  3386. "movq (%0), %%mm0 \n\t"\
  3387. "movq (%0,%2), %%mm1 \n\t"\
  3388. "movq %%mm0, (%1) \n\t"\
  3389. "movq %%mm1, (%1, %3) \n\t"\
  3390. "1: \n\t"
  3391. SIMPLE_CPY
  3392. "addl %%eax, %0 \n\t"
  3393. "addl %%ebx, %1 \n\t"
  3394. SIMPLE_CPY
  3395. "addl %%eax, %0 \n\t"
  3396. "addl %%ebx, %1 \n\t"
  3397. "decl temp0 \n\t"
  3398. "jnz 1b \n\t"
  3399. "popl %1 \n\t"
  3400. "popl %0 \n\t"
  3401. : : "r" (src),
  3402. "r" (dst),
  3403. "r" (srcStride),
  3404. "r" (dstStride),
  3405. "m" (numLines>>2)
  3406. : "%eax", "%ebx"
  3407. );
  3408. #else
  3409. for(i=0; i<numLines; i++)
  3410. memcpy( &(dst[dstStride*i]),
  3411. &(src[srcStride*i]), BLOCK_SIZE);
  3412. #endif
  3413. }
  3414. }
  3415. /**
  3416. * Filters array of bytes (Y or U or V values)
  3417. */
  3418. static void postProcess(uint8_t src[], int srcStride, uint8_t dst[], int dstStride, int width, int height,
  3419. QP_STORE_T QPs[], int QPStride, int isColor, struct PPMode *ppMode)
  3420. {
  3421. int x,y;
  3422. const int mode= isColor ? ppMode->chromMode : ppMode->lumMode;
  3423. /* we need 64bit here otherwise we´ll going to have a problem
  3424. after watching a black picture for 5 hours*/
  3425. static uint64_t *yHistogram= NULL;
  3426. int black=0, white=255; // blackest black and whitest white in the picture
  3427. int QPCorrecture= 256;
  3428. /* Temporary buffers for handling the last row(s) */
  3429. static uint8_t *tempDst= NULL;
  3430. static uint8_t *tempSrc= NULL;
  3431. /* Temporary buffers for handling the last block */
  3432. static uint8_t *tempDstBlock= NULL;
  3433. static uint8_t *tempSrcBlock= NULL;
  3434. /* Temporal noise reducing buffers */
  3435. static uint8_t *tempBlured[3]= {NULL,NULL,NULL};
  3436. static uint32_t *tempBluredPast[3]= {NULL,NULL,NULL};
  3437. #ifdef PP_FUNNY_STRIDE
  3438. uint8_t *dstBlockPtrBackup;
  3439. uint8_t *srcBlockPtrBackup;
  3440. #endif
  3441. #ifdef MORE_TIMING
  3442. long long T0, T1, diffTime=0;
  3443. #endif
  3444. #ifdef TIMING
  3445. long long memcpyTime=0, vertTime=0, horizTime=0, sumTime;
  3446. sumTime= rdtsc();
  3447. #endif
  3448. //mode= 0x7F;
  3449. #ifdef HAVE_MMX
  3450. maxTmpNoise[0]= ppMode->maxTmpNoise[0];
  3451. maxTmpNoise[1]= ppMode->maxTmpNoise[1];
  3452. maxTmpNoise[2]= ppMode->maxTmpNoise[2];
  3453. #endif
  3454. if(tempDst==NULL)
  3455. {
  3456. tempDst= (uint8_t*)memalign(8, 1024*24);
  3457. tempSrc= (uint8_t*)memalign(8, 1024*24);
  3458. tempDstBlock= (uint8_t*)memalign(8, 1024*24);
  3459. tempSrcBlock= (uint8_t*)memalign(8, 1024*24);
  3460. }
  3461. if(tempBlured[isColor]==NULL && (mode & TEMP_NOISE_FILTER))
  3462. {
  3463. // printf("%d %d %d\n", isColor, dstStride, height);
  3464. //FIXME works only as long as the size doesnt increase
  3465. //Note:the +17*1024 is just there so i dont have to worry about r/w over te end
  3466. tempBlured[isColor]= (uint8_t*)memalign(8, dstStride*((height+7)&(~7)) + 17*1024);
  3467. tempBluredPast[isColor]= (uint32_t*)memalign(8, 256*((height+7)&(~7))/2 + 17*1024);
  3468. memset(tempBlured[isColor], 0, dstStride*((height+7)&(~7)) + 17*1024);
  3469. memset(tempBluredPast[isColor], 0, 256*((height+7)&(~7))/2 + 17*1024);
  3470. }
  3471. if(!yHistogram)
  3472. {
  3473. int i;
  3474. yHistogram= (uint64_t*)malloc(8*256);
  3475. for(i=0; i<256; i++) yHistogram[i]= width*height/64*15/256;
  3476. if(mode & FULL_Y_RANGE)
  3477. {
  3478. maxAllowedY=255;
  3479. minAllowedY=0;
  3480. }
  3481. }
  3482. if(!isColor)
  3483. {
  3484. uint64_t sum= 0;
  3485. int i;
  3486. static int framenum= -1;
  3487. uint64_t maxClipped;
  3488. uint64_t clipped;
  3489. double scale;
  3490. framenum++;
  3491. if(framenum == 1) yHistogram[0]= width*height/64*15/256;
  3492. for(i=0; i<256; i++)
  3493. {
  3494. sum+= yHistogram[i];
  3495. // printf("%d ", yHistogram[i]);
  3496. }
  3497. // printf("\n\n");
  3498. /* we allways get a completly black picture first */
  3499. maxClipped= (uint64_t)(sum * maxClippedThreshold);
  3500. clipped= sum;
  3501. for(black=255; black>0; black--)
  3502. {
  3503. if(clipped < maxClipped) break;
  3504. clipped-= yHistogram[black];
  3505. }
  3506. clipped= sum;
  3507. for(white=0; white<256; white++)
  3508. {
  3509. if(clipped < maxClipped) break;
  3510. clipped-= yHistogram[white];
  3511. }
  3512. packedYOffset= (black - minAllowedY) & 0xFFFF;
  3513. packedYOffset|= packedYOffset<<32;
  3514. packedYOffset|= packedYOffset<<16;
  3515. scale= (double)(maxAllowedY - minAllowedY) / (double)(white-black);
  3516. packedYScale= (uint16_t)(scale*1024.0 + 0.5);
  3517. packedYScale|= packedYScale<<32;
  3518. packedYScale|= packedYScale<<16;
  3519. }
  3520. else
  3521. {
  3522. packedYScale= 0x0100010001000100LL;
  3523. packedYOffset= 0;
  3524. }
  3525. if(mode & LEVEL_FIX) QPCorrecture= packedYScale &0xFFFF;
  3526. else QPCorrecture= 256;
  3527. /* copy & deinterlace first row of blocks */
  3528. y=-BLOCK_SIZE;
  3529. {
  3530. //1% speedup if these are here instead of the inner loop
  3531. uint8_t *srcBlock= &(src[y*srcStride]);
  3532. uint8_t *dstBlock= &(dst[y*dstStride]);
  3533. dstBlock= tempDst + dstStride;
  3534. // From this point on it is guranteed that we can read and write 16 lines downward
  3535. // finish 1 block before the next otherwise we´ll might have a problem
  3536. // with the L1 Cache of the P4 ... or only a few blocks at a time or soemthing
  3537. for(x=0; x<width; x+=BLOCK_SIZE)
  3538. {
  3539. #ifdef HAVE_MMX2
  3540. /*
  3541. prefetchnta(srcBlock + (((x>>3)&3) + 5)*srcStride + 32);
  3542. prefetchnta(srcBlock + (((x>>3)&3) + 9)*srcStride + 32);
  3543. prefetcht0(dstBlock + (((x>>3)&3) + 5)*dstStride + 32);
  3544. prefetcht0(dstBlock + (((x>>3)&3) + 9)*dstStride + 32);
  3545. */
  3546. /*
  3547. prefetchnta(srcBlock + (((x>>2)&6) + 5)*srcStride + 32);
  3548. prefetchnta(srcBlock + (((x>>2)&6) + 6)*srcStride + 32);
  3549. prefetcht0(dstBlock + (((x>>2)&6) + 5)*dstStride + 32);
  3550. prefetcht0(dstBlock + (((x>>2)&6) + 6)*dstStride + 32);
  3551. */
  3552. asm(
  3553. "movl %4, %%eax \n\t"
  3554. "shrl $2, %%eax \n\t"
  3555. "andl $6, %%eax \n\t"
  3556. "addl $8, %%eax \n\t"
  3557. "movl %%eax, %%ebx \n\t"
  3558. "imul %1, %%eax \n\t"
  3559. "imul %3, %%ebx \n\t"
  3560. "prefetchnta 32(%%eax, %0) \n\t"
  3561. "prefetcht0 32(%%ebx, %2) \n\t"
  3562. "addl %1, %%eax \n\t"
  3563. "addl %3, %%ebx \n\t"
  3564. "prefetchnta 32(%%eax, %0) \n\t"
  3565. "prefetcht0 32(%%ebx, %2) \n\t"
  3566. :: "r" (srcBlock), "r" (srcStride), "r" (dstBlock), "r" (dstStride),
  3567. "m" (x)
  3568. : "%eax", "%ebx"
  3569. );
  3570. #elif defined(HAVE_3DNOW)
  3571. //FIXME check if this is faster on an 3dnow chip or if its faster without the prefetch or ...
  3572. /* prefetch(srcBlock + (((x>>3)&3) + 5)*srcStride + 32);
  3573. prefetch(srcBlock + (((x>>3)&3) + 9)*srcStride + 32);
  3574. prefetchw(dstBlock + (((x>>3)&3) + 5)*dstStride + 32);
  3575. prefetchw(dstBlock + (((x>>3)&3) + 9)*dstStride + 32);
  3576. */
  3577. #endif
  3578. blockCopy(dstBlock + dstStride*8, dstStride,
  3579. srcBlock + srcStride*8, srcStride, 8, mode & LEVEL_FIX);
  3580. if(mode & LINEAR_IPOL_DEINT_FILTER)
  3581. deInterlaceInterpolateLinear(dstBlock, dstStride);
  3582. else if(mode & LINEAR_BLEND_DEINT_FILTER)
  3583. deInterlaceBlendLinear(dstBlock, dstStride);
  3584. else if(mode & MEDIAN_DEINT_FILTER)
  3585. deInterlaceMedian(dstBlock, dstStride);
  3586. else if(mode & CUBIC_IPOL_DEINT_FILTER)
  3587. deInterlaceInterpolateCubic(dstBlock, dstStride);
  3588. /* else if(mode & CUBIC_BLEND_DEINT_FILTER)
  3589. deInterlaceBlendCubic(dstBlock, dstStride);
  3590. */
  3591. dstBlock+=8;
  3592. srcBlock+=8;
  3593. }
  3594. memcpy(&(dst[y*dstStride]) + 8*dstStride, tempDst + 9*dstStride, 8*dstStride );
  3595. }
  3596. for(y=0; y<height; y+=BLOCK_SIZE)
  3597. {
  3598. //1% speedup if these are here instead of the inner loop
  3599. uint8_t *srcBlock= &(src[y*srcStride]);
  3600. uint8_t *dstBlock= &(dst[y*dstStride]);
  3601. #ifdef ARCH_X86
  3602. int *QPptr= isColor ? &QPs[(y>>3)*QPStride] :&QPs[(y>>4)*QPStride];
  3603. int QPDelta= isColor ? 1<<(32-3) : 1<<(32-4);
  3604. int QPFrac= QPDelta;
  3605. uint8_t *tempBlock1= tempBlocks;
  3606. uint8_t *tempBlock2= tempBlocks + 8;
  3607. #endif
  3608. int QP=0;
  3609. /* can we mess with a 8x16 block from srcBlock/dstBlock downwards and 1 line upwards
  3610. if not than use a temporary buffer */
  3611. if(y+15 >= height)
  3612. {
  3613. int i;
  3614. /* copy from line 8 to 15 of src, these will be copied with
  3615. blockcopy to dst later */
  3616. memcpy(tempSrc + srcStride*8, srcBlock + srcStride*8,
  3617. srcStride*MAX(height-y-8, 0) );
  3618. /* duplicate last line of src to fill the void upto line 15 */
  3619. for(i=MAX(height-y, 8); i<=15; i++)
  3620. memcpy(tempSrc + srcStride*i, src + srcStride*(height-1), srcStride);
  3621. /* copy up to 9 lines of dst (line -1 to 7)*/
  3622. memcpy(tempDst, dstBlock - dstStride, dstStride*MIN(height-y+1, 9) );
  3623. /* duplicate last line of dst to fill the void upto line 8 */
  3624. for(i=height-y+1; i<=8; i++)
  3625. memcpy(tempDst + dstStride*i, dst + dstStride*(height-1), dstStride);
  3626. dstBlock= tempDst + dstStride;
  3627. srcBlock= tempSrc;
  3628. }
  3629. // From this point on it is guranteed that we can read and write 16 lines downward
  3630. // finish 1 block before the next otherwise we´ll might have a problem
  3631. // with the L1 Cache of the P4 ... or only a few blocks at a time or soemthing
  3632. for(x=0; x<width; x+=BLOCK_SIZE)
  3633. {
  3634. const int stride= dstStride;
  3635. uint8_t *tmpXchg;
  3636. #ifdef ARCH_X86
  3637. QP= *QPptr;
  3638. asm volatile(
  3639. "addl %2, %1 \n\t"
  3640. "sbbl %%eax, %%eax \n\t"
  3641. "shll $2, %%eax \n\t"
  3642. "subl %%eax, %0 \n\t"
  3643. : "+r" (QPptr), "+m" (QPFrac)
  3644. : "r" (QPDelta)
  3645. : "%eax"
  3646. );
  3647. #else
  3648. QP= isColor ?
  3649. QPs[(y>>3)*QPStride + (x>>3)]:
  3650. QPs[(y>>4)*QPStride + (x>>4)];
  3651. #endif
  3652. if(!isColor)
  3653. {
  3654. QP= (QP* QPCorrecture)>>8;
  3655. yHistogram[ srcBlock[srcStride*12 + 4] ]++;
  3656. }
  3657. #ifdef HAVE_MMX
  3658. asm volatile(
  3659. "movd %0, %%mm7 \n\t"
  3660. "packuswb %%mm7, %%mm7 \n\t" // 0, 0, 0, QP, 0, 0, 0, QP
  3661. "packuswb %%mm7, %%mm7 \n\t" // 0,QP, 0, QP, 0,QP, 0, QP
  3662. "packuswb %%mm7, %%mm7 \n\t" // QP,..., QP
  3663. "movq %%mm7, pQPb \n\t"
  3664. : : "r" (QP)
  3665. );
  3666. #endif
  3667. #ifdef MORE_TIMING
  3668. T0= rdtsc();
  3669. #endif
  3670. #ifdef HAVE_MMX2
  3671. /*
  3672. prefetchnta(srcBlock + (((x>>3)&3) + 5)*srcStride + 32);
  3673. prefetchnta(srcBlock + (((x>>3)&3) + 9)*srcStride + 32);
  3674. prefetcht0(dstBlock + (((x>>3)&3) + 5)*dstStride + 32);
  3675. prefetcht0(dstBlock + (((x>>3)&3) + 9)*dstStride + 32);
  3676. */
  3677. /*
  3678. prefetchnta(srcBlock + (((x>>2)&6) + 5)*srcStride + 32);
  3679. prefetchnta(srcBlock + (((x>>2)&6) + 6)*srcStride + 32);
  3680. prefetcht0(dstBlock + (((x>>2)&6) + 5)*dstStride + 32);
  3681. prefetcht0(dstBlock + (((x>>2)&6) + 6)*dstStride + 32);
  3682. */
  3683. asm(
  3684. "movl %4, %%eax \n\t"
  3685. "shrl $2, %%eax \n\t"
  3686. "andl $6, %%eax \n\t"
  3687. "addl $8, %%eax \n\t"
  3688. "movl %%eax, %%ebx \n\t"
  3689. "imul %1, %%eax \n\t"
  3690. "imul %3, %%ebx \n\t"
  3691. "prefetchnta 32(%%eax, %0) \n\t"
  3692. "prefetcht0 32(%%ebx, %2) \n\t"
  3693. "addl %1, %%eax \n\t"
  3694. "addl %3, %%ebx \n\t"
  3695. "prefetchnta 32(%%eax, %0) \n\t"
  3696. "prefetcht0 32(%%ebx, %2) \n\t"
  3697. :: "r" (srcBlock), "r" (srcStride), "r" (dstBlock), "r" (dstStride),
  3698. "m" (x)
  3699. : "%eax", "%ebx"
  3700. );
  3701. #elif defined(HAVE_3DNOW)
  3702. //FIXME check if this is faster on an 3dnow chip or if its faster without the prefetch or ...
  3703. /* prefetch(srcBlock + (((x>>3)&3) + 5)*srcStride + 32);
  3704. prefetch(srcBlock + (((x>>3)&3) + 9)*srcStride + 32);
  3705. prefetchw(dstBlock + (((x>>3)&3) + 5)*dstStride + 32);
  3706. prefetchw(dstBlock + (((x>>3)&3) + 9)*dstStride + 32);
  3707. */
  3708. #endif
  3709. #ifdef PP_FUNNY_STRIDE
  3710. //can we mess with a 8x16 block, if not use a temp buffer, yes again
  3711. if(x+7 >= width)
  3712. {
  3713. int i;
  3714. dstBlockPtrBackup= dstBlock;
  3715. srcBlockPtrBackup= srcBlock;
  3716. for(i=0;i<BLOCK_SIZE*2; i++)
  3717. {
  3718. memcpy(tempSrcBlock+i*srcStride, srcBlock+i*srcStride, width-x);
  3719. memcpy(tempDstBlock+i*dstStride, dstBlock+i*dstStride, width-x);
  3720. }
  3721. dstBlock= tempDstBlock;
  3722. srcBlock= tempSrcBlock;
  3723. }
  3724. #endif
  3725. blockCopy(dstBlock + dstStride*8, dstStride,
  3726. srcBlock + srcStride*8, srcStride, 8, mode & LEVEL_FIX);
  3727. if(mode & LINEAR_IPOL_DEINT_FILTER)
  3728. deInterlaceInterpolateLinear(dstBlock, dstStride);
  3729. else if(mode & LINEAR_BLEND_DEINT_FILTER)
  3730. deInterlaceBlendLinear(dstBlock, dstStride);
  3731. else if(mode & MEDIAN_DEINT_FILTER)
  3732. deInterlaceMedian(dstBlock, dstStride);
  3733. else if(mode & CUBIC_IPOL_DEINT_FILTER)
  3734. deInterlaceInterpolateCubic(dstBlock, dstStride);
  3735. /* else if(mode & CUBIC_BLEND_DEINT_FILTER)
  3736. deInterlaceBlendCubic(dstBlock, dstStride);
  3737. */
  3738. /* only deblock if we have 2 blocks */
  3739. if(y + 8 < height)
  3740. {
  3741. #ifdef MORE_TIMING
  3742. T1= rdtsc();
  3743. memcpyTime+= T1-T0;
  3744. T0=T1;
  3745. #endif
  3746. if(mode & V_RK1_FILTER)
  3747. vertRK1Filter(dstBlock, stride, QP);
  3748. else if(mode & V_X1_FILTER)
  3749. vertX1Filter(dstBlock, stride, QP);
  3750. else if(mode & V_DEBLOCK)
  3751. {
  3752. if( isVertDC(dstBlock, stride))
  3753. {
  3754. if(isVertMinMaxOk(dstBlock, stride, QP))
  3755. doVertLowPass(dstBlock, stride, QP);
  3756. }
  3757. else
  3758. doVertDefFilter(dstBlock, stride, QP);
  3759. }
  3760. #ifdef MORE_TIMING
  3761. T1= rdtsc();
  3762. vertTime+= T1-T0;
  3763. T0=T1;
  3764. #endif
  3765. }
  3766. #ifdef HAVE_MMX
  3767. transpose1(tempBlock1, tempBlock2, dstBlock, dstStride);
  3768. #endif
  3769. /* check if we have a previous block to deblock it with dstBlock */
  3770. if(x - 8 >= 0)
  3771. {
  3772. #ifdef MORE_TIMING
  3773. T0= rdtsc();
  3774. #endif
  3775. #ifdef HAVE_MMX
  3776. if(mode & H_RK1_FILTER)
  3777. vertRK1Filter(tempBlock1, 16, QP);
  3778. else if(mode & H_X1_FILTER)
  3779. vertX1Filter(tempBlock1, 16, QP);
  3780. else if(mode & H_DEBLOCK)
  3781. {
  3782. if( isVertDC(tempBlock1, 16))
  3783. {
  3784. if(isVertMinMaxOk(tempBlock1, 16, QP))
  3785. doVertLowPass(tempBlock1, 16, QP);
  3786. }
  3787. else
  3788. doVertDefFilter(tempBlock1, 16, QP);
  3789. }
  3790. transpose2(dstBlock-4, dstStride, tempBlock1 + 4*16);
  3791. #else
  3792. if(mode & H_X1_FILTER)
  3793. horizX1Filter(dstBlock-4, stride, QP);
  3794. else if(mode & H_DEBLOCK)
  3795. {
  3796. if( isHorizDC(dstBlock-4, stride))
  3797. {
  3798. if(isHorizMinMaxOk(dstBlock-4, stride, QP))
  3799. doHorizLowPass(dstBlock-4, stride, QP);
  3800. }
  3801. else
  3802. doHorizDefFilter(dstBlock-4, stride, QP);
  3803. }
  3804. #endif
  3805. #ifdef MORE_TIMING
  3806. T1= rdtsc();
  3807. horizTime+= T1-T0;
  3808. T0=T1;
  3809. #endif
  3810. if(mode & DERING)
  3811. {
  3812. //FIXME filter first line
  3813. if(y>0) dering(dstBlock - stride - 8, stride, QP);
  3814. }
  3815. if(mode & TEMP_NOISE_FILTER)
  3816. {
  3817. tempNoiseReducer(dstBlock-8, stride,
  3818. tempBlured[isColor] + y*dstStride + x,
  3819. tempBluredPast[isColor] + (y>>3)*256 + (x>>3),
  3820. ppMode->maxTmpNoise);
  3821. }
  3822. }
  3823. #ifdef PP_FUNNY_STRIDE
  3824. /* did we use a tmp-block buffer */
  3825. if(x+7 >= width)
  3826. {
  3827. int i;
  3828. dstBlock= dstBlockPtrBackup;
  3829. srcBlock= srcBlockPtrBackup;
  3830. for(i=0;i<BLOCK_SIZE*2; i++)
  3831. {
  3832. memcpy(dstBlock+i*dstStride, tempDstBlock+i*dstStride, width-x);
  3833. }
  3834. }
  3835. #endif
  3836. dstBlock+=8;
  3837. srcBlock+=8;
  3838. #ifdef HAVE_MMX
  3839. tmpXchg= tempBlock1;
  3840. tempBlock1= tempBlock2;
  3841. tempBlock2 = tmpXchg;
  3842. #endif
  3843. }
  3844. if(mode & DERING)
  3845. {
  3846. if(y > 0) dering(dstBlock - dstStride - 8, dstStride, QP);
  3847. }
  3848. if((mode & TEMP_NOISE_FILTER))
  3849. {
  3850. tempNoiseReducer(dstBlock-8, dstStride,
  3851. tempBlured[isColor] + y*dstStride + x,
  3852. tempBluredPast[isColor] + (y>>3)*256 + (x>>3),
  3853. ppMode->maxTmpNoise);
  3854. }
  3855. /* did we use a tmp buffer for the last lines*/
  3856. if(y+15 >= height)
  3857. {
  3858. uint8_t *dstBlock= &(dst[y*dstStride]);
  3859. memcpy(dstBlock, tempDst + dstStride, dstStride*(height-y) );
  3860. }
  3861. /*
  3862. for(x=0; x<width; x+=32)
  3863. {
  3864. int i;
  3865. i+= + dstBlock[x + 7*dstStride] + dstBlock[x + 8*dstStride]
  3866. + dstBlock[x + 9*dstStride] + dstBlock[x +10*dstStride]
  3867. + dstBlock[x +11*dstStride] + dstBlock[x +12*dstStride]
  3868. + dstBlock[x +13*dstStride] + dstBlock[x +14*dstStride]
  3869. + dstBlock[x +15*dstStride];
  3870. }
  3871. */ }
  3872. #ifdef HAVE_3DNOW
  3873. asm volatile("femms");
  3874. #elif defined (HAVE_MMX)
  3875. asm volatile("emms");
  3876. #endif
  3877. #ifdef TIMING
  3878. // FIXME diff is mostly the time spent for rdtsc (should subtract that but ...)
  3879. sumTime= rdtsc() - sumTime;
  3880. if(!isColor)
  3881. printf("cpy:%4dk, vert:%4dk, horiz:%4dk, sum:%4dk, diff:%4dk, color: %d/%d \r",
  3882. (int)(memcpyTime/1000), (int)(vertTime/1000), (int)(horizTime/1000),
  3883. (int)(sumTime/1000), (int)((sumTime-memcpyTime-vertTime-horizTime)/1000)
  3884. , black, white);
  3885. #endif
  3886. #ifdef DEBUG_BRIGHTNESS
  3887. if(!isColor)
  3888. {
  3889. int max=1;
  3890. int i;
  3891. for(i=0; i<256; i++)
  3892. if(yHistogram[i] > max) max=yHistogram[i];
  3893. for(i=1; i<256; i++)
  3894. {
  3895. int x;
  3896. int start=yHistogram[i-1]/(max/256+1);
  3897. int end=yHistogram[i]/(max/256+1);
  3898. int inc= end > start ? 1 : -1;
  3899. for(x=start; x!=end+inc; x+=inc)
  3900. dst[ i*dstStride + x]+=128;
  3901. }
  3902. for(i=0; i<100; i+=2)
  3903. {
  3904. dst[ (white)*dstStride + i]+=128;
  3905. dst[ (black)*dstStride + i]+=128;
  3906. }
  3907. }
  3908. #endif
  3909. }