You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3603 lines
105KB

  1. /*
  2. Copyright (C) 2001 Michael Niedermayer (michaelni@gmx.at)
  3. This program is free software; you can redistribute it and/or modify
  4. it under the terms of the GNU General Public License as published by
  5. the Free Software Foundation; either version 2 of the License, or
  6. (at your option) any later version.
  7. This program is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU General Public License for more details.
  11. You should have received a copy of the GNU General Public License
  12. along with this program; if not, write to the Free Software
  13. Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  14. */
  15. /*
  16. C MMX MMX2 3DNow
  17. isVertDC Ec Ec
  18. isVertMinMaxOk Ec Ec
  19. doVertLowPass E e e
  20. doVertDefFilter Ec Ec e e
  21. isHorizDC Ec Ec
  22. isHorizMinMaxOk a E
  23. doHorizLowPass E e e
  24. doHorizDefFilter Ec Ec e e
  25. deRing E e e*
  26. Vertical RKAlgo1 E a a
  27. Horizontal RKAlgo1 a a
  28. Vertical X1# a E E
  29. Horizontal X1# a E E
  30. LinIpolDeinterlace e E E*
  31. CubicIpolDeinterlace a e e*
  32. LinBlendDeinterlace e E E*
  33. MedianDeinterlace# Ec Ec
  34. TempDeNoiser# E e e
  35. * i dont have a 3dnow CPU -> its untested, but noone said it doesnt work so it seems to work
  36. # more or less selfinvented filters so the exactness isnt too meaningfull
  37. E = Exact implementation
  38. e = allmost exact implementation (slightly different rounding,...)
  39. a = alternative / approximate impl
  40. c = checked against the other implementations (-vo md5)
  41. */
  42. /*
  43. TODO:
  44. verify that everything workes as it should (how?)
  45. reduce the time wasted on the mem transfer
  46. implement everything in C at least (done at the moment but ...)
  47. unroll stuff if instructions depend too much on the prior one
  48. we use 8x8 blocks for the horizontal filters, opendivx seems to use 8x4?
  49. move YScale thing to the end instead of fixing QP
  50. write a faster and higher quality deblocking filter :)
  51. make the mainloop more flexible (variable number of blocks at once
  52. (the if/else stuff per block is slowing things down)
  53. compare the quality & speed of all filters
  54. split this huge file
  55. border remover
  56. optimize c versions
  57. try to unroll inner for(x=0 ... loop to avoid these damn if(x ... checks
  58. smart blur
  59. commandline option for the deblock thresholds
  60. ...
  61. */
  62. //Changelog: use the CVS log
  63. #include "../config.h"
  64. #include <inttypes.h>
  65. #include <stdio.h>
  66. #include <stdlib.h>
  67. #include <string.h>
  68. #ifdef HAVE_MALLOC_H
  69. #include <malloc.h>
  70. #endif
  71. //#undef HAVE_MMX2
  72. //#define HAVE_3DNOW
  73. //#undef HAVE_MMX
  74. //#define DEBUG_BRIGHTNESS
  75. #include "postprocess.h"
  76. #define MIN(a,b) ((a) > (b) ? (b) : (a))
  77. #define MAX(a,b) ((a) < (b) ? (b) : (a))
  78. #define ABS(a) ((a) > 0 ? (a) : (-(a)))
  79. #define SIGN(a) ((a) > 0 ? 1 : -1)
  80. #ifdef HAVE_MMX2
  81. #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
  82. #elif defined (HAVE_3DNOW)
  83. #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
  84. #endif
  85. #ifdef HAVE_MMX2
  86. #define PMINUB(a,b,t) "pminub " #a ", " #b " \n\t"
  87. #elif defined (HAVE_MMX)
  88. #define PMINUB(b,a,t) \
  89. "movq " #a ", " #t " \n\t"\
  90. "psubusb " #b ", " #t " \n\t"\
  91. "psubb " #t ", " #a " \n\t"
  92. #endif
  93. #ifdef HAVE_MMX2
  94. #define PMAXUB(a,b) "pmaxub " #a ", " #b " \n\t"
  95. #elif defined (HAVE_MMX)
  96. #define PMAXUB(a,b) \
  97. "psubusb " #a ", " #b " \n\t"\
  98. "paddb " #a ", " #b " \n\t"
  99. #endif
  100. #define GET_MODE_BUFFER_SIZE 500
  101. #define OPTIONS_ARRAY_SIZE 10
  102. #ifdef HAVE_MMX
  103. static volatile uint64_t __attribute__((aligned(8))) packedYOffset= 0x0000000000000000LL;
  104. static volatile uint64_t __attribute__((aligned(8))) packedYScale= 0x0100010001000100LL;
  105. static uint64_t __attribute__((aligned(8))) w05= 0x0005000500050005LL;
  106. static uint64_t __attribute__((aligned(8))) w20= 0x0020002000200020LL;
  107. static uint64_t __attribute__((aligned(8))) w1400= 0x1400140014001400LL;
  108. static uint64_t __attribute__((aligned(8))) bm00000001= 0x00000000000000FFLL;
  109. static uint64_t __attribute__((aligned(8))) bm00010000= 0x000000FF00000000LL;
  110. static uint64_t __attribute__((aligned(8))) bm00001000= 0x00000000FF000000LL;
  111. static uint64_t __attribute__((aligned(8))) bm10000000= 0xFF00000000000000LL;
  112. static uint64_t __attribute__((aligned(8))) bm10000001= 0xFF000000000000FFLL;
  113. static uint64_t __attribute__((aligned(8))) bm11000011= 0xFFFF00000000FFFFLL;
  114. static uint64_t __attribute__((aligned(8))) bm00000011= 0x000000000000FFFFLL;
  115. static uint64_t __attribute__((aligned(8))) bm11111110= 0xFFFFFFFFFFFFFF00LL;
  116. static uint64_t __attribute__((aligned(8))) bm11000000= 0xFFFF000000000000LL;
  117. static uint64_t __attribute__((aligned(8))) bm00011000= 0x000000FFFF000000LL;
  118. static uint64_t __attribute__((aligned(8))) bm00110011= 0x0000FFFF0000FFFFLL;
  119. static uint64_t __attribute__((aligned(8))) bm11001100= 0xFFFF0000FFFF0000LL;
  120. static uint64_t __attribute__((aligned(8))) b00= 0x0000000000000000LL;
  121. static uint64_t __attribute__((aligned(8))) b01= 0x0101010101010101LL;
  122. static uint64_t __attribute__((aligned(8))) b02= 0x0202020202020202LL;
  123. static uint64_t __attribute__((aligned(8))) b0F= 0x0F0F0F0F0F0F0F0FLL;
  124. static uint64_t __attribute__((aligned(8))) b04= 0x0404040404040404LL;
  125. static uint64_t __attribute__((aligned(8))) b08= 0x0808080808080808LL;
  126. static uint64_t __attribute__((aligned(8))) bFF= 0xFFFFFFFFFFFFFFFFLL;
  127. static uint64_t __attribute__((aligned(8))) b20= 0x2020202020202020LL;
  128. static uint64_t __attribute__((aligned(8))) b80= 0x8080808080808080LL;
  129. static uint64_t __attribute__((aligned(8))) b7E= 0x7E7E7E7E7E7E7E7ELL;
  130. static uint64_t __attribute__((aligned(8))) b7C= 0x7C7C7C7C7C7C7C7CLL;
  131. static uint64_t __attribute__((aligned(8))) b3F= 0x3F3F3F3F3F3F3F3FLL;
  132. static uint64_t __attribute__((aligned(8))) temp0=0;
  133. static uint64_t __attribute__((aligned(8))) temp1=0;
  134. static uint64_t __attribute__((aligned(8))) temp2=0;
  135. static uint64_t __attribute__((aligned(8))) temp3=0;
  136. static uint64_t __attribute__((aligned(8))) temp4=0;
  137. static uint64_t __attribute__((aligned(8))) temp5=0;
  138. static uint64_t __attribute__((aligned(8))) pQPb=0;
  139. static uint64_t __attribute__((aligned(8))) pQPb2=0;
  140. static uint8_t __attribute__((aligned(8))) tempBlocks[8*16*2]; //used for the horizontal code
  141. static uint32_t __attribute__((aligned(4))) maxTmpNoise[4];
  142. #else
  143. static uint64_t packedYOffset= 0x0000000000000000LL;
  144. static uint64_t packedYScale= 0x0100010001000100LL;
  145. static uint8_t tempBlocks[8*16*2]; //used for the horizontal code
  146. #endif
  147. int hFlatnessThreshold= 56 - 16;
  148. int vFlatnessThreshold= 56 - 16;
  149. //amount of "black" u r willing to loose to get a brightness corrected picture
  150. double maxClippedThreshold= 0.01;
  151. int maxAllowedY=234;
  152. int minAllowedY=16;
  153. static struct PPFilter filters[]=
  154. {
  155. {"hb", "hdeblock", 1, 1, 3, H_DEBLOCK},
  156. {"vb", "vdeblock", 1, 2, 4, V_DEBLOCK},
  157. {"vr", "rkvdeblock", 1, 2, 4, H_RK1_FILTER},
  158. {"h1", "x1hdeblock", 1, 1, 3, H_X1_FILTER},
  159. {"v1", "x1vdeblock", 1, 2, 4, V_X1_FILTER},
  160. {"dr", "dering", 1, 5, 6, DERING},
  161. {"al", "autolevels", 0, 1, 2, LEVEL_FIX},
  162. {"lb", "linblenddeint", 0, 1, 6, LINEAR_BLEND_DEINT_FILTER},
  163. {"li", "linipoldeint", 0, 1, 6, LINEAR_IPOL_DEINT_FILTER},
  164. {"ci", "cubicipoldeint", 0, 1, 6, CUBIC_IPOL_DEINT_FILTER},
  165. {"md", "mediandeint", 0, 1, 6, MEDIAN_DEINT_FILTER},
  166. {"tn", "tmpnoise", 1, 7, 8, TEMP_NOISE_FILTER},
  167. {NULL, NULL,0,0,0,0} //End Marker
  168. };
  169. static char *replaceTable[]=
  170. {
  171. "default", "hdeblock:a,vdeblock:a,dering:a,autolevels,tmpnoise:a:150:200:400",
  172. "de", "hdeblock:a,vdeblock:a,dering:a,autolevels,tmpnoise:a:150:200:400",
  173. "fast", "x1hdeblock:a,x1vdeblock:a,dering:a,autolevels,tmpnoise:a:150:200:400",
  174. "fa", "x1hdeblock:a,x1vdeblock:a,dering:a,autolevels,tmpnoise:a:150:200:400",
  175. NULL //End Marker
  176. };
  177. #ifdef HAVE_MMX
  178. static inline void unusedVariableWarningFixer()
  179. {
  180. if(
  181. packedYOffset + packedYScale + w05 + w20 + w1400 + bm00000001 + bm00010000
  182. + bm00001000 + bm10000000 + bm10000001 + bm11000011 + bm00000011 + bm11111110
  183. + bm11000000 + bm00011000 + bm00110011 + bm11001100 + b00 + b01 + b02 + b0F
  184. + bFF + b20 + b04+ b08 + pQPb2 + b80 + b7E + b7C + b3F + temp0 + temp1 + temp2 + temp3 + temp4
  185. + temp5 + pQPb== 0) b00=0;
  186. }
  187. #endif
  188. #ifdef TIMING
  189. static inline long long rdtsc()
  190. {
  191. long long l;
  192. asm volatile( "rdtsc\n\t"
  193. : "=A" (l)
  194. );
  195. // printf("%d\n", int(l/1000));
  196. return l;
  197. }
  198. #endif
  199. #ifdef HAVE_MMX2
  200. static inline void prefetchnta(void *p)
  201. {
  202. asm volatile( "prefetchnta (%0)\n\t"
  203. : : "r" (p)
  204. );
  205. }
  206. static inline void prefetcht0(void *p)
  207. {
  208. asm volatile( "prefetcht0 (%0)\n\t"
  209. : : "r" (p)
  210. );
  211. }
  212. static inline void prefetcht1(void *p)
  213. {
  214. asm volatile( "prefetcht1 (%0)\n\t"
  215. : : "r" (p)
  216. );
  217. }
  218. static inline void prefetcht2(void *p)
  219. {
  220. asm volatile( "prefetcht2 (%0)\n\t"
  221. : : "r" (p)
  222. );
  223. }
  224. #endif
  225. //FIXME? |255-0| = 1 (shouldnt be a problem ...)
  226. /**
  227. * Check if the middle 8x8 Block in the given 8x16 block is flat
  228. */
  229. static inline int isVertDC(uint8_t src[], int stride){
  230. int numEq= 0;
  231. #ifndef HAVE_MMX
  232. int y;
  233. #endif
  234. src+= stride*4; // src points to begin of the 8x8 Block
  235. #ifdef HAVE_MMX
  236. asm volatile(
  237. "leal (%1, %2), %%eax \n\t"
  238. "leal (%%eax, %2, 4), %%ebx \n\t"
  239. // 0 1 2 3 4 5 6 7 8 9
  240. // %1 eax eax+%2 eax+2%2 %1+4%2 ebx ebx+%2 ebx+2%2 %1+8%2 ebx+4%2
  241. "movq b7E, %%mm7 \n\t" // mm7 = 0x7F
  242. "movq b7C, %%mm6 \n\t" // mm6 = 0x7D
  243. "movq (%1), %%mm0 \n\t"
  244. "movq (%%eax), %%mm1 \n\t"
  245. "psubb %%mm1, %%mm0 \n\t" // mm0 = differnece
  246. "paddb %%mm7, %%mm0 \n\t"
  247. "pcmpgtb %%mm6, %%mm0 \n\t"
  248. "movq (%%eax,%2), %%mm2 \n\t"
  249. "psubb %%mm2, %%mm1 \n\t"
  250. "paddb %%mm7, %%mm1 \n\t"
  251. "pcmpgtb %%mm6, %%mm1 \n\t"
  252. "paddb %%mm1, %%mm0 \n\t"
  253. "movq (%%eax, %2, 2), %%mm1 \n\t"
  254. "psubb %%mm1, %%mm2 \n\t"
  255. "paddb %%mm7, %%mm2 \n\t"
  256. "pcmpgtb %%mm6, %%mm2 \n\t"
  257. "paddb %%mm2, %%mm0 \n\t"
  258. "movq (%1, %2, 4), %%mm2 \n\t"
  259. "psubb %%mm2, %%mm1 \n\t"
  260. "paddb %%mm7, %%mm1 \n\t"
  261. "pcmpgtb %%mm6, %%mm1 \n\t"
  262. "paddb %%mm1, %%mm0 \n\t"
  263. "movq (%%ebx), %%mm1 \n\t"
  264. "psubb %%mm1, %%mm2 \n\t"
  265. "paddb %%mm7, %%mm2 \n\t"
  266. "pcmpgtb %%mm6, %%mm2 \n\t"
  267. "paddb %%mm2, %%mm0 \n\t"
  268. "movq (%%ebx, %2), %%mm2 \n\t"
  269. "psubb %%mm2, %%mm1 \n\t"
  270. "paddb %%mm7, %%mm1 \n\t"
  271. "pcmpgtb %%mm6, %%mm1 \n\t"
  272. "paddb %%mm1, %%mm0 \n\t"
  273. "movq (%%ebx, %2, 2), %%mm1 \n\t"
  274. "psubb %%mm1, %%mm2 \n\t"
  275. "paddb %%mm7, %%mm2 \n\t"
  276. "pcmpgtb %%mm6, %%mm2 \n\t"
  277. "paddb %%mm2, %%mm0 \n\t"
  278. " \n\t"
  279. "movq %%mm0, %%mm1 \n\t"
  280. "psrlw $8, %%mm0 \n\t"
  281. "paddb %%mm1, %%mm0 \n\t"
  282. #ifdef HAVE_MMX2
  283. "pshufw $0xF9, %%mm0, %%mm1 \n\t"
  284. "paddb %%mm1, %%mm0 \n\t"
  285. "pshufw $0xFE, %%mm0, %%mm1 \n\t"
  286. #else
  287. "movq %%mm0, %%mm1 \n\t"
  288. "psrlq $16, %%mm0 \n\t"
  289. "paddb %%mm1, %%mm0 \n\t"
  290. "movq %%mm0, %%mm1 \n\t"
  291. "psrlq $32, %%mm0 \n\t"
  292. #endif
  293. "paddb %%mm1, %%mm0 \n\t"
  294. "movd %%mm0, %0 \n\t"
  295. : "=r" (numEq)
  296. : "r" (src), "r" (stride)
  297. : "%eax", "%ebx"
  298. );
  299. numEq= (256 - numEq) &0xFF;
  300. #else
  301. for(y=0; y<BLOCK_SIZE-1; y++)
  302. {
  303. if(((src[0] - src[0+stride] + 1)&0xFFFF) < 3) numEq++;
  304. if(((src[1] - src[1+stride] + 1)&0xFFFF) < 3) numEq++;
  305. if(((src[2] - src[2+stride] + 1)&0xFFFF) < 3) numEq++;
  306. if(((src[3] - src[3+stride] + 1)&0xFFFF) < 3) numEq++;
  307. if(((src[4] - src[4+stride] + 1)&0xFFFF) < 3) numEq++;
  308. if(((src[5] - src[5+stride] + 1)&0xFFFF) < 3) numEq++;
  309. if(((src[6] - src[6+stride] + 1)&0xFFFF) < 3) numEq++;
  310. if(((src[7] - src[7+stride] + 1)&0xFFFF) < 3) numEq++;
  311. src+= stride;
  312. }
  313. #endif
  314. /* if(abs(numEq - asmEq) > 0)
  315. {
  316. printf("\nasm:%d c:%d\n", asmEq, numEq);
  317. for(int y=0; y<8; y++)
  318. {
  319. for(int x=0; x<8; x++)
  320. {
  321. printf("%d ", temp[x + y*stride]);
  322. }
  323. printf("\n");
  324. }
  325. }
  326. */
  327. // for(int i=0; i<numEq/8; i++) src[i]=255;
  328. return (numEq > vFlatnessThreshold) ? 1 : 0;
  329. }
  330. static inline int isVertMinMaxOk(uint8_t src[], int stride, int QP)
  331. {
  332. #ifdef HAVE_MMX
  333. int isOk;
  334. src+= stride*3;
  335. asm volatile(
  336. // "int $3 \n\t"
  337. "movq (%1, %2), %%mm0 \n\t"
  338. "movq (%1, %2, 8), %%mm1 \n\t"
  339. "movq %%mm0, %%mm2 \n\t"
  340. "psubusb %%mm1, %%mm0 \n\t"
  341. "psubusb %%mm2, %%mm1 \n\t"
  342. "por %%mm1, %%mm0 \n\t" // ABS Diff
  343. "movq pQPb, %%mm7 \n\t" // QP,..., QP
  344. "paddusb %%mm7, %%mm7 \n\t" // 2QP ... 2QP
  345. "psubusb %%mm7, %%mm0 \n\t" // Diff <= 2QP -> 0
  346. "pcmpeqd b00, %%mm0 \n\t"
  347. "psrlq $16, %%mm0 \n\t"
  348. "pcmpeqd bFF, %%mm0 \n\t"
  349. // "movd %%mm0, (%1, %2, 4)\n\t"
  350. "movd %%mm0, %0 \n\t"
  351. : "=r" (isOk)
  352. : "r" (src), "r" (stride)
  353. );
  354. return isOk;
  355. #else
  356. int isOk2= 1;
  357. int x;
  358. src+= stride*3;
  359. for(x=0; x<BLOCK_SIZE; x++)
  360. {
  361. if(abs((int)src[x + stride] - (int)src[x + (stride<<3)]) > 2*QP) isOk2=0;
  362. }
  363. /* if(isOk && !isOk2 || !isOk && isOk2)
  364. {
  365. printf("\nasm:%d c:%d QP:%d\n", isOk, isOk2, QP);
  366. for(int y=0; y<9; y++)
  367. {
  368. for(int x=0; x<8; x++)
  369. {
  370. printf("%d ", src[x + y*stride]);
  371. }
  372. printf("\n");
  373. }
  374. } */
  375. return isOk2;
  376. #endif
  377. }
  378. /**
  379. * Do a vertical low pass filter on the 8x16 block (only write to the 8x8 block in the middle)
  380. * using the 9-Tap Filter (1,1,2,2,4,2,2,1,1)/16
  381. */
  382. static inline void doVertLowPass(uint8_t *src, int stride, int QP)
  383. {
  384. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  385. src+= stride*3;
  386. asm volatile( //"movv %0 %1 %2\n\t"
  387. "movq pQPb, %%mm0 \n\t" // QP,..., QP
  388. "movq (%0), %%mm6 \n\t"
  389. "movq (%0, %1), %%mm5 \n\t"
  390. "movq %%mm5, %%mm1 \n\t"
  391. "movq %%mm6, %%mm2 \n\t"
  392. "psubusb %%mm6, %%mm5 \n\t"
  393. "psubusb %%mm1, %%mm2 \n\t"
  394. "por %%mm5, %%mm2 \n\t" // ABS Diff of lines
  395. "psubusb %%mm0, %%mm2 \n\t" // diff <= QP -> 0
  396. "pcmpeqb b00, %%mm2 \n\t" // diff <= QP -> FF
  397. "pand %%mm2, %%mm6 \n\t"
  398. "pandn %%mm1, %%mm2 \n\t"
  399. "por %%mm2, %%mm6 \n\t"// First Line to Filter
  400. "movq (%0, %1, 8), %%mm5 \n\t"
  401. "leal (%0, %1, 4), %%eax \n\t"
  402. "leal (%0, %1, 8), %%ebx \n\t"
  403. "subl %1, %%ebx \n\t"
  404. "addl %1, %0 \n\t" // %0 points to line 1 not 0
  405. "movq (%0, %1, 8), %%mm7 \n\t"
  406. "movq %%mm5, %%mm1 \n\t"
  407. "movq %%mm7, %%mm2 \n\t"
  408. "psubusb %%mm7, %%mm5 \n\t"
  409. "psubusb %%mm1, %%mm2 \n\t"
  410. "por %%mm5, %%mm2 \n\t" // ABS Diff of lines
  411. "psubusb %%mm0, %%mm2 \n\t" // diff <= QP -> 0
  412. "pcmpeqb b00, %%mm2 \n\t" // diff <= QP -> FF
  413. "pand %%mm2, %%mm7 \n\t"
  414. "pandn %%mm1, %%mm2 \n\t"
  415. "por %%mm2, %%mm7 \n\t" // First Line to Filter
  416. // 1 2 3 4 5 6 7 8
  417. // %0 %0+%1 %0+2%1 eax %0+4%1 eax+2%1 ebx eax+4%1
  418. // 6 4 2 2 1 1
  419. // 6 4 4 2
  420. // 6 8 2
  421. "movq (%0, %1), %%mm0 \n\t" // 1
  422. "movq %%mm0, %%mm1 \n\t" // 1
  423. PAVGB(%%mm6, %%mm0) //1 1 /2
  424. PAVGB(%%mm6, %%mm0) //3 1 /4
  425. "movq (%0, %1, 4), %%mm2 \n\t" // 1
  426. "movq %%mm2, %%mm5 \n\t" // 1
  427. PAVGB((%%eax), %%mm2) // 11 /2
  428. PAVGB((%0, %1, 2), %%mm2) // 211 /4
  429. "movq %%mm2, %%mm3 \n\t" // 211 /4
  430. "movq (%0), %%mm4 \n\t" // 1
  431. PAVGB(%%mm4, %%mm3) // 4 211 /8
  432. PAVGB(%%mm0, %%mm3) //642211 /16
  433. "movq %%mm3, (%0) \n\t" // X
  434. // mm1=2 mm2=3(211) mm4=1 mm5=5 mm6=0 mm7=9
  435. "movq %%mm1, %%mm0 \n\t" // 1
  436. PAVGB(%%mm6, %%mm0) //1 1 /2
  437. "movq %%mm4, %%mm3 \n\t" // 1
  438. PAVGB((%0,%1,2), %%mm3) // 1 1 /2
  439. PAVGB((%%eax,%1,2), %%mm5) // 11 /2
  440. PAVGB((%%eax), %%mm5) // 211 /4
  441. PAVGB(%%mm5, %%mm3) // 2 2211 /8
  442. PAVGB(%%mm0, %%mm3) //4242211 /16
  443. "movq %%mm3, (%0,%1) \n\t" // X
  444. // mm1=2 mm2=3(211) mm4=1 mm5=4(211) mm6=0 mm7=9
  445. PAVGB(%%mm4, %%mm6) //11 /2
  446. "movq (%%ebx), %%mm0 \n\t" // 1
  447. PAVGB((%%eax, %1, 2), %%mm0) // 11/2
  448. "movq %%mm0, %%mm3 \n\t" // 11/2
  449. PAVGB(%%mm1, %%mm0) // 2 11/4
  450. PAVGB(%%mm6, %%mm0) //222 11/8
  451. PAVGB(%%mm2, %%mm0) //22242211/16
  452. "movq (%0, %1, 2), %%mm2 \n\t" // 1
  453. "movq %%mm0, (%0, %1, 2) \n\t" // X
  454. // mm1=2 mm2=3 mm3=6(11) mm4=1 mm5=4(211) mm6=0(11) mm7=9
  455. "movq (%%eax, %1, 4), %%mm0 \n\t" // 1
  456. PAVGB((%%ebx), %%mm0) // 11 /2
  457. PAVGB(%%mm0, %%mm6) //11 11 /4
  458. PAVGB(%%mm1, %%mm4) // 11 /2
  459. PAVGB(%%mm2, %%mm1) // 11 /2
  460. PAVGB(%%mm1, %%mm6) //1122 11 /8
  461. PAVGB(%%mm5, %%mm6) //112242211 /16
  462. "movq (%%eax), %%mm5 \n\t" // 1
  463. "movq %%mm6, (%%eax) \n\t" // X
  464. // mm0=7(11) mm1=2(11) mm2=3 mm3=6(11) mm4=1(11) mm5=4 mm7=9
  465. "movq (%%eax, %1, 4), %%mm6 \n\t" // 1
  466. PAVGB(%%mm7, %%mm6) // 11 /2
  467. PAVGB(%%mm4, %%mm6) // 11 11 /4
  468. PAVGB(%%mm3, %%mm6) // 11 2211 /8
  469. PAVGB(%%mm5, %%mm2) // 11 /2
  470. "movq (%0, %1, 4), %%mm4 \n\t" // 1
  471. PAVGB(%%mm4, %%mm2) // 112 /4
  472. PAVGB(%%mm2, %%mm6) // 112242211 /16
  473. "movq %%mm6, (%0, %1, 4) \n\t" // X
  474. // mm0=7(11) mm1=2(11) mm2=3(112) mm3=6(11) mm4=5 mm5=4 mm7=9
  475. PAVGB(%%mm7, %%mm1) // 11 2 /4
  476. PAVGB(%%mm4, %%mm5) // 11 /2
  477. PAVGB(%%mm5, %%mm0) // 11 11 /4
  478. "movq (%%eax, %1, 2), %%mm6 \n\t" // 1
  479. PAVGB(%%mm6, %%mm1) // 11 4 2 /8
  480. PAVGB(%%mm0, %%mm1) // 11224222 /16
  481. "movq %%mm1, (%%eax, %1, 2) \n\t" // X
  482. // mm2=3(112) mm3=6(11) mm4=5 mm5=4(11) mm6=6 mm7=9
  483. PAVGB((%%ebx), %%mm2) // 112 4 /8
  484. "movq (%%eax, %1, 4), %%mm0 \n\t" // 1
  485. PAVGB(%%mm0, %%mm6) // 1 1 /2
  486. PAVGB(%%mm7, %%mm6) // 1 12 /4
  487. PAVGB(%%mm2, %%mm6) // 1122424 /4
  488. "movq %%mm6, (%%ebx) \n\t" // X
  489. // mm0=8 mm3=6(11) mm4=5 mm5=4(11) mm7=9
  490. PAVGB(%%mm7, %%mm5) // 11 2 /4
  491. PAVGB(%%mm7, %%mm5) // 11 6 /8
  492. PAVGB(%%mm3, %%mm0) // 112 /4
  493. PAVGB(%%mm0, %%mm5) // 112246 /16
  494. "movq %%mm5, (%%eax, %1, 4) \n\t" // X
  495. "subl %1, %0 \n\t"
  496. :
  497. : "r" (src), "r" (stride)
  498. : "%eax", "%ebx"
  499. );
  500. #else
  501. const int l1= stride;
  502. const int l2= stride + l1;
  503. const int l3= stride + l2;
  504. const int l4= stride + l3;
  505. const int l5= stride + l4;
  506. const int l6= stride + l5;
  507. const int l7= stride + l6;
  508. const int l8= stride + l7;
  509. const int l9= stride + l8;
  510. int x;
  511. src+= stride*3;
  512. for(x=0; x<BLOCK_SIZE; x++)
  513. {
  514. const int first= ABS(src[0] - src[l1]) < QP ? src[0] : src[l1];
  515. const int last= ABS(src[l8] - src[l9]) < QP ? src[l9] : src[l8];
  516. int sums[9];
  517. sums[0] = first + src[l1];
  518. sums[1] = src[l1] + src[l2];
  519. sums[2] = src[l2] + src[l3];
  520. sums[3] = src[l3] + src[l4];
  521. sums[4] = src[l4] + src[l5];
  522. sums[5] = src[l5] + src[l6];
  523. sums[6] = src[l6] + src[l7];
  524. sums[7] = src[l7] + src[l8];
  525. sums[8] = src[l8] + last;
  526. src[l1]= ((sums[0]<<2) + ((first + sums[2])<<1) + sums[4] + 8)>>4;
  527. src[l2]= ((src[l2]<<2) + ((first + sums[0] + sums[3])<<1) + sums[5] + 8)>>4;
  528. src[l3]= ((src[l3]<<2) + ((first + sums[1] + sums[4])<<1) + sums[6] + 8)>>4;
  529. src[l4]= ((src[l4]<<2) + ((sums[2] + sums[5])<<1) + sums[0] + sums[7] + 8)>>4;
  530. src[l5]= ((src[l5]<<2) + ((sums[3] + sums[6])<<1) + sums[1] + sums[8] + 8)>>4;
  531. src[l6]= ((src[l6]<<2) + ((last + sums[7] + sums[4])<<1) + sums[2] + 8)>>4;
  532. src[l7]= (((last + src[l7])<<2) + ((src[l8] + sums[5])<<1) + sums[3] + 8)>>4;
  533. src[l8]= ((sums[8]<<2) + ((last + sums[6])<<1) + sums[4] + 8)>>4;
  534. src++;
  535. }
  536. #endif
  537. }
  538. /**
  539. * Experimental implementation of the filter (Algorithm 1) described in a paper from Ramkishor & Karandikar
  540. * values are correctly clipped (MMX2)
  541. * values are wraparound (C)
  542. * conclusion: its fast, but introduces ugly horizontal patterns if there is a continious gradient
  543. 0 8 16 24
  544. x = 8
  545. x/2 = 4
  546. x/8 = 1
  547. 1 12 12 23
  548. */
  549. static inline void vertRK1Filter(uint8_t *src, int stride, int QP)
  550. {
  551. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  552. src+= stride*3;
  553. // FIXME rounding
  554. asm volatile(
  555. "pxor %%mm7, %%mm7 \n\t" // 0
  556. "movq b80, %%mm6 \n\t" // MIN_SIGNED_BYTE
  557. "leal (%0, %1), %%eax \n\t"
  558. "leal (%%eax, %1, 4), %%ebx \n\t"
  559. // 0 1 2 3 4 5 6 7 8 9
  560. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  561. "movq pQPb, %%mm0 \n\t" // QP,..., QP
  562. "movq %%mm0, %%mm1 \n\t" // QP,..., QP
  563. "paddusb b02, %%mm0 \n\t"
  564. "psrlw $2, %%mm0 \n\t"
  565. "pand b3F, %%mm0 \n\t" // QP/4,..., QP/4
  566. "paddusb %%mm1, %%mm0 \n\t" // QP*1.25 ...
  567. "movq (%0, %1, 4), %%mm2 \n\t" // line 4
  568. "movq (%%ebx), %%mm3 \n\t" // line 5
  569. "movq %%mm2, %%mm4 \n\t" // line 4
  570. "pcmpeqb %%mm5, %%mm5 \n\t" // -1
  571. "pxor %%mm2, %%mm5 \n\t" // -line 4 - 1
  572. PAVGB(%%mm3, %%mm5)
  573. "paddb %%mm6, %%mm5 \n\t" // (l5-l4)/2
  574. "psubusb %%mm3, %%mm4 \n\t"
  575. "psubusb %%mm2, %%mm3 \n\t"
  576. "por %%mm3, %%mm4 \n\t" // |l4 - l5|
  577. "psubusb %%mm0, %%mm4 \n\t"
  578. "pcmpeqb %%mm7, %%mm4 \n\t"
  579. "pand %%mm4, %%mm5 \n\t" // d/2
  580. // "paddb %%mm6, %%mm2 \n\t" // line 4 + 0x80
  581. "paddb %%mm5, %%mm2 \n\t"
  582. // "psubb %%mm6, %%mm2 \n\t"
  583. "movq %%mm2, (%0,%1, 4) \n\t"
  584. "movq (%%ebx), %%mm2 \n\t"
  585. // "paddb %%mm6, %%mm2 \n\t" // line 5 + 0x80
  586. "psubb %%mm5, %%mm2 \n\t"
  587. // "psubb %%mm6, %%mm2 \n\t"
  588. "movq %%mm2, (%%ebx) \n\t"
  589. "paddb %%mm6, %%mm5 \n\t"
  590. "psrlw $2, %%mm5 \n\t"
  591. "pand b3F, %%mm5 \n\t"
  592. "psubb b20, %%mm5 \n\t" // (l5-l4)/8
  593. "movq (%%eax, %1, 2), %%mm2 \n\t"
  594. "paddb %%mm6, %%mm2 \n\t" // line 3 + 0x80
  595. "paddsb %%mm5, %%mm2 \n\t"
  596. "psubb %%mm6, %%mm2 \n\t"
  597. "movq %%mm2, (%%eax, %1, 2) \n\t"
  598. "movq (%%ebx, %1), %%mm2 \n\t"
  599. "paddb %%mm6, %%mm2 \n\t" // line 6 + 0x80
  600. "psubsb %%mm5, %%mm2 \n\t"
  601. "psubb %%mm6, %%mm2 \n\t"
  602. "movq %%mm2, (%%ebx, %1) \n\t"
  603. :
  604. : "r" (src), "r" (stride)
  605. : "%eax", "%ebx"
  606. );
  607. #else
  608. const int l1= stride;
  609. const int l2= stride + l1;
  610. const int l3= stride + l2;
  611. const int l4= stride + l3;
  612. const int l5= stride + l4;
  613. const int l6= stride + l5;
  614. // const int l7= stride + l6;
  615. // const int l8= stride + l7;
  616. // const int l9= stride + l8;
  617. int x;
  618. const int QP15= QP + (QP>>2);
  619. src+= stride*3;
  620. for(x=0; x<BLOCK_SIZE; x++)
  621. {
  622. const int v = (src[x+l5] - src[x+l4]);
  623. if(ABS(v) < QP15)
  624. {
  625. src[x+l3] +=v>>3;
  626. src[x+l4] +=v>>1;
  627. src[x+l5] -=v>>1;
  628. src[x+l6] -=v>>3;
  629. }
  630. }
  631. #endif
  632. }
  633. /**
  634. * Experimental Filter 1
  635. * will not damage linear gradients
  636. * Flat blocks should look like they where passed through the (1,1,2,2,4,2,2,1,1) 9-Tap filter
  637. * can only smooth blocks at the expected locations (it cant smooth them if they did move)
  638. * MMX2 version does correct clipping C version doesnt
  639. */
  640. static inline void vertX1Filter(uint8_t *src, int stride, int QP)
  641. {
  642. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  643. src+= stride*3;
  644. asm volatile(
  645. "pxor %%mm7, %%mm7 \n\t" // 0
  646. // "movq b80, %%mm6 \n\t" // MIN_SIGNED_BYTE
  647. "leal (%0, %1), %%eax \n\t"
  648. "leal (%%eax, %1, 4), %%ebx \n\t"
  649. // 0 1 2 3 4 5 6 7 8 9
  650. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  651. "movq (%%eax, %1, 2), %%mm0 \n\t" // line 3
  652. "movq (%0, %1, 4), %%mm1 \n\t" // line 4
  653. "movq %%mm1, %%mm2 \n\t" // line 4
  654. "psubusb %%mm0, %%mm1 \n\t"
  655. "psubusb %%mm2, %%mm0 \n\t"
  656. "por %%mm1, %%mm0 \n\t" // |l2 - l3|
  657. "movq (%%ebx), %%mm3 \n\t" // line 5
  658. "movq (%%ebx, %1), %%mm4 \n\t" // line 6
  659. "movq %%mm3, %%mm5 \n\t" // line 5
  660. "psubusb %%mm4, %%mm3 \n\t"
  661. "psubusb %%mm5, %%mm4 \n\t"
  662. "por %%mm4, %%mm3 \n\t" // |l5 - l6|
  663. PAVGB(%%mm3, %%mm0) // (|l2 - l3| + |l5 - l6|)/2
  664. "movq %%mm2, %%mm1 \n\t" // line 4
  665. "psubusb %%mm5, %%mm2 \n\t"
  666. "movq %%mm2, %%mm4 \n\t"
  667. "pcmpeqb %%mm7, %%mm2 \n\t" // (l4 - l5) <= 0 ? -1 : 0
  668. "psubusb %%mm1, %%mm5 \n\t"
  669. "por %%mm5, %%mm4 \n\t" // |l4 - l5|
  670. "psubusb %%mm0, %%mm4 \n\t" //d = MAX(0, |l4-l5| - (|l2-l3| + |l5-l6|)/2)
  671. "movq %%mm4, %%mm3 \n\t" // d
  672. "psubusb pQPb, %%mm4 \n\t"
  673. "pcmpeqb %%mm7, %%mm4 \n\t" // d <= QP ? -1 : 0
  674. "psubusb b01, %%mm3 \n\t"
  675. "pand %%mm4, %%mm3 \n\t" // d <= QP ? d : 0
  676. PAVGB(%%mm7, %%mm3) // d/2
  677. "movq %%mm3, %%mm1 \n\t" // d/2
  678. PAVGB(%%mm7, %%mm3) // d/4
  679. PAVGB(%%mm1, %%mm3) // 3*d/8
  680. "movq (%0, %1, 4), %%mm0 \n\t" // line 4
  681. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l4-1 : l4
  682. "psubusb %%mm3, %%mm0 \n\t"
  683. "pxor %%mm2, %%mm0 \n\t"
  684. "movq %%mm0, (%0, %1, 4) \n\t" // line 4
  685. "movq (%%ebx), %%mm0 \n\t" // line 5
  686. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l5-1 : l5
  687. "paddusb %%mm3, %%mm0 \n\t"
  688. "pxor %%mm2, %%mm0 \n\t"
  689. "movq %%mm0, (%%ebx) \n\t" // line 5
  690. PAVGB(%%mm7, %%mm1) // d/4
  691. "movq (%%eax, %1, 2), %%mm0 \n\t" // line 3
  692. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l4-1 : l4
  693. "psubusb %%mm1, %%mm0 \n\t"
  694. "pxor %%mm2, %%mm0 \n\t"
  695. "movq %%mm0, (%%eax, %1, 2) \n\t" // line 3
  696. "movq (%%ebx, %1), %%mm0 \n\t" // line 6
  697. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l5-1 : l5
  698. "paddusb %%mm1, %%mm0 \n\t"
  699. "pxor %%mm2, %%mm0 \n\t"
  700. "movq %%mm0, (%%ebx, %1) \n\t" // line 6
  701. PAVGB(%%mm7, %%mm1) // d/8
  702. "movq (%%eax, %1), %%mm0 \n\t" // line 2
  703. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l2-1 : l2
  704. "psubusb %%mm1, %%mm0 \n\t"
  705. "pxor %%mm2, %%mm0 \n\t"
  706. "movq %%mm0, (%%eax, %1) \n\t" // line 2
  707. "movq (%%ebx, %1, 2), %%mm0 \n\t" // line 7
  708. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l7-1 : l7
  709. "paddusb %%mm1, %%mm0 \n\t"
  710. "pxor %%mm2, %%mm0 \n\t"
  711. "movq %%mm0, (%%ebx, %1, 2) \n\t" // line 7
  712. :
  713. : "r" (src), "r" (stride)
  714. : "%eax", "%ebx"
  715. );
  716. #else
  717. const int l1= stride;
  718. const int l2= stride + l1;
  719. const int l3= stride + l2;
  720. const int l4= stride + l3;
  721. const int l5= stride + l4;
  722. const int l6= stride + l5;
  723. const int l7= stride + l6;
  724. // const int l8= stride + l7;
  725. // const int l9= stride + l8;
  726. int x;
  727. src+= stride*3;
  728. for(x=0; x<BLOCK_SIZE; x++)
  729. {
  730. int a= src[l3] - src[l4];
  731. int b= src[l4] - src[l5];
  732. int c= src[l5] - src[l6];
  733. int d= ABS(b) - ((ABS(a) + ABS(c))>>1);
  734. d= MAX(d, 0);
  735. if(d < QP)
  736. {
  737. int v = d * SIGN(-b);
  738. src[l2] +=v>>3;
  739. src[l3] +=v>>2;
  740. src[l4] +=(3*v)>>3;
  741. src[l5] -=(3*v)>>3;
  742. src[l6] -=v>>2;
  743. src[l7] -=v>>3;
  744. }
  745. src++;
  746. }
  747. /*
  748. const int l1= stride;
  749. const int l2= stride + l1;
  750. const int l3= stride + l2;
  751. const int l4= stride + l3;
  752. const int l5= stride + l4;
  753. const int l6= stride + l5;
  754. const int l7= stride + l6;
  755. const int l8= stride + l7;
  756. const int l9= stride + l8;
  757. for(int x=0; x<BLOCK_SIZE; x++)
  758. {
  759. int v2= src[l2];
  760. int v3= src[l3];
  761. int v4= src[l4];
  762. int v5= src[l5];
  763. int v6= src[l6];
  764. int v7= src[l7];
  765. if(ABS(v4-v5)<QP && ABS(v4-v5) - (ABS(v3-v4) + ABS(v5-v6))>0 )
  766. {
  767. src[l3] = (6*v2 + 4*v3 + 3*v4 + 2*v5 + v6 )/16;
  768. src[l4] = (3*v2 + 3*v3 + 4*v4 + 3*v5 + 2*v6 + v7 )/16;
  769. src[l5] = (1*v2 + 2*v3 + 3*v4 + 4*v5 + 3*v6 + 3*v7)/16;
  770. src[l6] = ( 1*v3 + 2*v4 + 3*v5 + 4*v6 + 6*v7)/16;
  771. }
  772. src++;
  773. }
  774. */
  775. #endif
  776. }
  777. static inline void doVertDefFilter(uint8_t src[], int stride, int QP)
  778. {
  779. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  780. /*
  781. uint8_t tmp[16];
  782. const int l1= stride;
  783. const int l2= stride + l1;
  784. const int l3= stride + l2;
  785. const int l4= (int)tmp - (int)src - stride*3;
  786. const int l5= (int)tmp - (int)src - stride*3 + 8;
  787. const int l6= stride*3 + l3;
  788. const int l7= stride + l6;
  789. const int l8= stride + l7;
  790. memcpy(tmp, src+stride*7, 8);
  791. memcpy(tmp+8, src+stride*8, 8);
  792. */
  793. src+= stride*4;
  794. asm volatile(
  795. #if 0 //sligtly more accurate and slightly slower
  796. "pxor %%mm7, %%mm7 \n\t" // 0
  797. "leal (%0, %1), %%eax \n\t"
  798. "leal (%%eax, %1, 4), %%ebx \n\t"
  799. // 0 1 2 3 4 5 6 7
  800. // %0 %0+%1 %0+2%1 eax+2%1 %0+4%1 eax+4%1 ebx+%1 ebx+2%1
  801. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1
  802. "movq (%0, %1, 2), %%mm0 \n\t" // l2
  803. "movq (%0), %%mm1 \n\t" // l0
  804. "movq %%mm0, %%mm2 \n\t" // l2
  805. PAVGB(%%mm7, %%mm0) // ~l2/2
  806. PAVGB(%%mm1, %%mm0) // ~(l2 + 2l0)/4
  807. PAVGB(%%mm2, %%mm0) // ~(5l2 + 2l0)/8
  808. "movq (%%eax), %%mm1 \n\t" // l1
  809. "movq (%%eax, %1, 2), %%mm3 \n\t" // l3
  810. "movq %%mm1, %%mm4 \n\t" // l1
  811. PAVGB(%%mm7, %%mm1) // ~l1/2
  812. PAVGB(%%mm3, %%mm1) // ~(l1 + 2l3)/4
  813. PAVGB(%%mm4, %%mm1) // ~(5l1 + 2l3)/8
  814. "movq %%mm0, %%mm4 \n\t" // ~(5l2 + 2l0)/8
  815. "psubusb %%mm1, %%mm0 \n\t"
  816. "psubusb %%mm4, %%mm1 \n\t"
  817. "por %%mm0, %%mm1 \n\t" // ~|2l0 - 5l1 + 5l2 - 2l3|/8
  818. // mm1= |lenergy|, mm2= l2, mm3= l3, mm7=0
  819. "movq (%0, %1, 4), %%mm0 \n\t" // l4
  820. "movq %%mm0, %%mm4 \n\t" // l4
  821. PAVGB(%%mm7, %%mm0) // ~l4/2
  822. PAVGB(%%mm2, %%mm0) // ~(l4 + 2l2)/4
  823. PAVGB(%%mm4, %%mm0) // ~(5l4 + 2l2)/8
  824. "movq (%%ebx), %%mm2 \n\t" // l5
  825. "movq %%mm3, %%mm5 \n\t" // l3
  826. PAVGB(%%mm7, %%mm3) // ~l3/2
  827. PAVGB(%%mm2, %%mm3) // ~(l3 + 2l5)/4
  828. PAVGB(%%mm5, %%mm3) // ~(5l3 + 2l5)/8
  829. "movq %%mm0, %%mm6 \n\t" // ~(5l4 + 2l2)/8
  830. "psubusb %%mm3, %%mm0 \n\t"
  831. "psubusb %%mm6, %%mm3 \n\t"
  832. "por %%mm0, %%mm3 \n\t" // ~|2l2 - 5l3 + 5l4 - 2l5|/8
  833. "pcmpeqb %%mm7, %%mm0 \n\t" // SIGN(2l2 - 5l3 + 5l4 - 2l5)
  834. // mm0= SIGN(menergy), mm1= |lenergy|, mm2= l5, mm3= |menergy|, mm4=l4, mm5= l3, mm7=0
  835. "movq (%%ebx, %1), %%mm6 \n\t" // l6
  836. "movq %%mm6, %%mm5 \n\t" // l6
  837. PAVGB(%%mm7, %%mm6) // ~l6/2
  838. PAVGB(%%mm4, %%mm6) // ~(l6 + 2l4)/4
  839. PAVGB(%%mm5, %%mm6) // ~(5l6 + 2l4)/8
  840. "movq (%%ebx, %1, 2), %%mm5 \n\t" // l7
  841. "movq %%mm2, %%mm4 \n\t" // l5
  842. PAVGB(%%mm7, %%mm2) // ~l5/2
  843. PAVGB(%%mm5, %%mm2) // ~(l5 + 2l7)/4
  844. PAVGB(%%mm4, %%mm2) // ~(5l5 + 2l7)/8
  845. "movq %%mm6, %%mm4 \n\t" // ~(5l6 + 2l4)/8
  846. "psubusb %%mm2, %%mm6 \n\t"
  847. "psubusb %%mm4, %%mm2 \n\t"
  848. "por %%mm6, %%mm2 \n\t" // ~|2l4 - 5l5 + 5l6 - 2l7|/8
  849. // mm0= SIGN(menergy), mm1= |lenergy|/8, mm2= |renergy|/8, mm3= |menergy|/8, mm7=0
  850. PMINUB(%%mm2, %%mm1, %%mm4) // MIN(|lenergy|,|renergy|)/8
  851. "movq pQPb, %%mm4 \n\t" // QP //FIXME QP+1 ?
  852. "paddusb b01, %%mm4 \n\t"
  853. "pcmpgtb %%mm3, %%mm4 \n\t" // |menergy|/8 < QP
  854. "psubusb %%mm1, %%mm3 \n\t" // d=|menergy|/8-MIN(|lenergy|,|renergy|)/8
  855. "pand %%mm4, %%mm3 \n\t"
  856. "movq %%mm3, %%mm1 \n\t"
  857. // "psubusb b01, %%mm3 \n\t"
  858. PAVGB(%%mm7, %%mm3)
  859. PAVGB(%%mm7, %%mm3)
  860. "paddusb %%mm1, %%mm3 \n\t"
  861. // "paddusb b01, %%mm3 \n\t"
  862. "movq (%%eax, %1, 2), %%mm6 \n\t" //l3
  863. "movq (%0, %1, 4), %%mm5 \n\t" //l4
  864. "movq (%0, %1, 4), %%mm4 \n\t" //l4
  865. "psubusb %%mm6, %%mm5 \n\t"
  866. "psubusb %%mm4, %%mm6 \n\t"
  867. "por %%mm6, %%mm5 \n\t" // |l3-l4|
  868. "pcmpeqb %%mm7, %%mm6 \n\t" // SIGN(l3-l4)
  869. "pxor %%mm6, %%mm0 \n\t"
  870. "pand %%mm0, %%mm3 \n\t"
  871. PMINUB(%%mm5, %%mm3, %%mm0)
  872. "psubusb b01, %%mm3 \n\t"
  873. PAVGB(%%mm7, %%mm3)
  874. "movq (%%eax, %1, 2), %%mm0 \n\t"
  875. "movq (%0, %1, 4), %%mm2 \n\t"
  876. "pxor %%mm6, %%mm0 \n\t"
  877. "pxor %%mm6, %%mm2 \n\t"
  878. "psubb %%mm3, %%mm0 \n\t"
  879. "paddb %%mm3, %%mm2 \n\t"
  880. "pxor %%mm6, %%mm0 \n\t"
  881. "pxor %%mm6, %%mm2 \n\t"
  882. "movq %%mm0, (%%eax, %1, 2) \n\t"
  883. "movq %%mm2, (%0, %1, 4) \n\t"
  884. #endif
  885. "leal (%0, %1), %%eax \n\t"
  886. "pcmpeqb %%mm6, %%mm6 \n\t" // -1
  887. // 0 1 2 3 4 5 6 7
  888. // %0 %0+%1 %0+2%1 eax+2%1 %0+4%1 eax+4%1 ebx+%1 ebx+2%1
  889. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1
  890. "movq (%%eax, %1, 2), %%mm1 \n\t" // l3
  891. "movq (%0, %1, 4), %%mm0 \n\t" // l4
  892. "pxor %%mm6, %%mm1 \n\t" // -l3-1
  893. PAVGB(%%mm1, %%mm0) // -q+128 = (l4-l3+256)/2
  894. // mm1=-l3-1, mm0=128-q
  895. "movq (%%eax, %1, 4), %%mm2 \n\t" // l5
  896. "movq (%%eax, %1), %%mm3 \n\t" // l2
  897. "pxor %%mm6, %%mm2 \n\t" // -l5-1
  898. "movq %%mm2, %%mm5 \n\t" // -l5-1
  899. "movq b80, %%mm4 \n\t" // 128
  900. "leal (%%eax, %1, 4), %%ebx \n\t"
  901. PAVGB(%%mm3, %%mm2) // (l2-l5+256)/2
  902. PAVGB(%%mm0, %%mm4) // ~(l4-l3)/4 + 128
  903. PAVGB(%%mm2, %%mm4) // ~(l2-l5)/4 +(l4-l3)/8 + 128
  904. PAVGB(%%mm0, %%mm4) // ~(l2-l5)/8 +5(l4-l3)/16 + 128
  905. // mm1=-l3-1, mm0=128-q, mm3=l2, mm4=menergy/16 + 128, mm5= -l5-1
  906. "movq (%%eax), %%mm2 \n\t" // l1
  907. "pxor %%mm6, %%mm2 \n\t" // -l1-1
  908. PAVGB(%%mm3, %%mm2) // (l2-l1+256)/2
  909. PAVGB((%0), %%mm1) // (l0-l3+256)/2
  910. "movq b80, %%mm3 \n\t" // 128
  911. PAVGB(%%mm2, %%mm3) // ~(l2-l1)/4 + 128
  912. PAVGB(%%mm1, %%mm3) // ~(l0-l3)/4 +(l2-l1)/8 + 128
  913. PAVGB(%%mm2, %%mm3) // ~(l0-l3)/8 +5(l2-l1)/16 + 128
  914. // mm0=128-q, mm3=lenergy/16 + 128, mm4= menergy/16 + 128, mm5= -l5-1
  915. PAVGB((%%ebx, %1), %%mm5) // (l6-l5+256)/2
  916. "movq (%%ebx, %1, 2), %%mm1 \n\t" // l7
  917. "pxor %%mm6, %%mm1 \n\t" // -l7-1
  918. PAVGB((%0, %1, 4), %%mm1) // (l4-l7+256)/2
  919. "movq b80, %%mm2 \n\t" // 128
  920. PAVGB(%%mm5, %%mm2) // ~(l6-l5)/4 + 128
  921. PAVGB(%%mm1, %%mm2) // ~(l4-l7)/4 +(l6-l5)/8 + 128
  922. PAVGB(%%mm5, %%mm2) // ~(l4-l7)/8 +5(l6-l5)/16 + 128
  923. // mm0=128-q, mm2=renergy/16 + 128, mm3=lenergy/16 + 128, mm4= menergy/16 + 128
  924. "movq b00, %%mm1 \n\t" // 0
  925. "movq b00, %%mm5 \n\t" // 0
  926. "psubb %%mm2, %%mm1 \n\t" // 128 - renergy/16
  927. "psubb %%mm3, %%mm5 \n\t" // 128 - lenergy/16
  928. PMAXUB(%%mm1, %%mm2) // 128 + |renergy/16|
  929. PMAXUB(%%mm5, %%mm3) // 128 + |lenergy/16|
  930. PMINUB(%%mm2, %%mm3, %%mm1) // 128 + MIN(|lenergy|,|renergy|)/16
  931. // mm0=128-q, mm3=128 + MIN(|lenergy|,|renergy|)/16, mm4= menergy/16 + 128
  932. "movq b00, %%mm7 \n\t" // 0
  933. "movq pQPb, %%mm2 \n\t" // QP
  934. PAVGB(%%mm6, %%mm2) // 128 + QP/2
  935. "psubb %%mm6, %%mm2 \n\t"
  936. "movq %%mm4, %%mm1 \n\t"
  937. "pcmpgtb %%mm7, %%mm1 \n\t" // SIGN(menergy)
  938. "pxor %%mm1, %%mm4 \n\t"
  939. "psubb %%mm1, %%mm4 \n\t" // 128 + |menergy|/16
  940. "pcmpgtb %%mm4, %%mm2 \n\t" // |menergy|/16 < QP/2
  941. "psubusb %%mm3, %%mm4 \n\t" //d=|menergy|/16 - MIN(|lenergy|,|renergy|)/16
  942. // mm0=128-q, mm1= SIGN(menergy), mm2= |menergy|/16 < QP/2, mm4= d/16
  943. "movq %%mm4, %%mm3 \n\t" // d
  944. "psubusb b01, %%mm4 \n\t"
  945. PAVGB(%%mm7, %%mm4) // d/32
  946. PAVGB(%%mm7, %%mm4) // (d + 32)/64
  947. "paddb %%mm3, %%mm4 \n\t" // 5d/64
  948. "pand %%mm2, %%mm4 \n\t"
  949. "movq b80, %%mm5 \n\t" // 128
  950. "psubb %%mm0, %%mm5 \n\t" // q
  951. "paddsb %%mm6, %%mm5 \n\t" // fix bad rounding
  952. "pcmpgtb %%mm5, %%mm7 \n\t" // SIGN(q)
  953. "pxor %%mm7, %%mm5 \n\t"
  954. PMINUB(%%mm5, %%mm4, %%mm3) // MIN(|q|, 5d/64)
  955. "pxor %%mm1, %%mm7 \n\t" // SIGN(d*q)
  956. "pand %%mm7, %%mm4 \n\t"
  957. "movq (%%eax, %1, 2), %%mm0 \n\t"
  958. "movq (%0, %1, 4), %%mm2 \n\t"
  959. "pxor %%mm1, %%mm0 \n\t"
  960. "pxor %%mm1, %%mm2 \n\t"
  961. "paddb %%mm4, %%mm0 \n\t"
  962. "psubb %%mm4, %%mm2 \n\t"
  963. "pxor %%mm1, %%mm0 \n\t"
  964. "pxor %%mm1, %%mm2 \n\t"
  965. "movq %%mm0, (%%eax, %1, 2) \n\t"
  966. "movq %%mm2, (%0, %1, 4) \n\t"
  967. :
  968. : "r" (src), "r" (stride)
  969. : "%eax", "%ebx"
  970. );
  971. /*
  972. {
  973. int x;
  974. src-= stride;
  975. for(x=0; x<BLOCK_SIZE; x++)
  976. {
  977. const int middleEnergy= 5*(src[l5] - src[l4]) + 2*(src[l3] - src[l6]);
  978. if(ABS(middleEnergy)< 8*QP)
  979. {
  980. const int q=(src[l4] - src[l5])/2;
  981. const int leftEnergy= 5*(src[l3] - src[l2]) + 2*(src[l1] - src[l4]);
  982. const int rightEnergy= 5*(src[l7] - src[l6]) + 2*(src[l5] - src[l8]);
  983. int d= ABS(middleEnergy) - MIN( ABS(leftEnergy), ABS(rightEnergy) );
  984. d= MAX(d, 0);
  985. d= (5*d + 32) >> 6;
  986. d*= SIGN(-middleEnergy);
  987. if(q>0)
  988. {
  989. d= d<0 ? 0 : d;
  990. d= d>q ? q : d;
  991. }
  992. else
  993. {
  994. d= d>0 ? 0 : d;
  995. d= d<q ? q : d;
  996. }
  997. src[l4]-= d;
  998. src[l5]+= d;
  999. }
  1000. src++;
  1001. }
  1002. src-=8;
  1003. for(x=0; x<8; x++)
  1004. {
  1005. int y;
  1006. for(y=4; y<6; y++)
  1007. {
  1008. int d= src[x+y*stride] - tmp[x+(y-4)*8];
  1009. int ad= ABS(d);
  1010. static int max=0;
  1011. static int sum=0;
  1012. static int num=0;
  1013. static int bias=0;
  1014. if(max<ad) max=ad;
  1015. sum+= ad>3 ? 1 : 0;
  1016. if(ad>3)
  1017. {
  1018. src[0] = src[7] = src[stride*7] = src[(stride+1)*7]=255;
  1019. }
  1020. if(y==4) bias+=d;
  1021. num++;
  1022. if(num%1000000 == 0)
  1023. {
  1024. printf(" %d %d %d %d\n", num, sum, max, bias);
  1025. }
  1026. }
  1027. }
  1028. }
  1029. */
  1030. #elif defined (HAVE_MMX)
  1031. src+= stride*4;
  1032. asm volatile(
  1033. "pxor %%mm7, %%mm7 \n\t"
  1034. "leal (%0, %1), %%eax \n\t"
  1035. "leal (%%eax, %1, 4), %%ebx \n\t"
  1036. // 0 1 2 3 4 5 6 7
  1037. // %0 %0+%1 %0+2%1 eax+2%1 %0+4%1 eax+4%1 ebx+%1 ebx+2%1
  1038. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1
  1039. "movq (%0), %%mm0 \n\t"
  1040. "movq %%mm0, %%mm1 \n\t"
  1041. "punpcklbw %%mm7, %%mm0 \n\t" // low part of line 0
  1042. "punpckhbw %%mm7, %%mm1 \n\t" // high part of line 0
  1043. "movq (%%eax), %%mm2 \n\t"
  1044. "movq %%mm2, %%mm3 \n\t"
  1045. "punpcklbw %%mm7, %%mm2 \n\t" // low part of line 1
  1046. "punpckhbw %%mm7, %%mm3 \n\t" // high part of line 1
  1047. "movq (%%eax, %1), %%mm4 \n\t"
  1048. "movq %%mm4, %%mm5 \n\t"
  1049. "punpcklbw %%mm7, %%mm4 \n\t" // low part of line 2
  1050. "punpckhbw %%mm7, %%mm5 \n\t" // high part of line 2
  1051. "paddw %%mm0, %%mm0 \n\t" // 2L0
  1052. "paddw %%mm1, %%mm1 \n\t" // 2H0
  1053. "psubw %%mm4, %%mm2 \n\t" // L1 - L2
  1054. "psubw %%mm5, %%mm3 \n\t" // H1 - H2
  1055. "psubw %%mm2, %%mm0 \n\t" // 2L0 - L1 + L2
  1056. "psubw %%mm3, %%mm1 \n\t" // 2H0 - H1 + H2
  1057. "psllw $2, %%mm2 \n\t" // 4L1 - 4L2
  1058. "psllw $2, %%mm3 \n\t" // 4H1 - 4H2
  1059. "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2
  1060. "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2
  1061. "movq (%%eax, %1, 2), %%mm2 \n\t"
  1062. "movq %%mm2, %%mm3 \n\t"
  1063. "punpcklbw %%mm7, %%mm2 \n\t" // L3
  1064. "punpckhbw %%mm7, %%mm3 \n\t" // H3
  1065. "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2 - L3
  1066. "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2 - H3
  1067. "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
  1068. "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
  1069. "movq %%mm0, temp0 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
  1070. "movq %%mm1, temp1 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
  1071. "movq (%0, %1, 4), %%mm0 \n\t"
  1072. "movq %%mm0, %%mm1 \n\t"
  1073. "punpcklbw %%mm7, %%mm0 \n\t" // L4
  1074. "punpckhbw %%mm7, %%mm1 \n\t" // H4
  1075. "psubw %%mm0, %%mm2 \n\t" // L3 - L4
  1076. "psubw %%mm1, %%mm3 \n\t" // H3 - H4
  1077. "movq %%mm2, temp2 \n\t" // L3 - L4
  1078. "movq %%mm3, temp3 \n\t" // H3 - H4
  1079. "paddw %%mm4, %%mm4 \n\t" // 2L2
  1080. "paddw %%mm5, %%mm5 \n\t" // 2H2
  1081. "psubw %%mm2, %%mm4 \n\t" // 2L2 - L3 + L4
  1082. "psubw %%mm3, %%mm5 \n\t" // 2H2 - H3 + H4
  1083. "psllw $2, %%mm2 \n\t" // 4L3 - 4L4
  1084. "psllw $2, %%mm3 \n\t" // 4H3 - 4H4
  1085. "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4
  1086. "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4
  1087. //50 opcodes so far
  1088. "movq (%%ebx), %%mm2 \n\t"
  1089. "movq %%mm2, %%mm3 \n\t"
  1090. "punpcklbw %%mm7, %%mm2 \n\t" // L5
  1091. "punpckhbw %%mm7, %%mm3 \n\t" // H5
  1092. "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4 - L5
  1093. "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4 - H5
  1094. "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4 - 2L5
  1095. "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4 - 2H5
  1096. "movq (%%ebx, %1), %%mm6 \n\t"
  1097. "punpcklbw %%mm7, %%mm6 \n\t" // L6
  1098. "psubw %%mm6, %%mm2 \n\t" // L5 - L6
  1099. "movq (%%ebx, %1), %%mm6 \n\t"
  1100. "punpckhbw %%mm7, %%mm6 \n\t" // H6
  1101. "psubw %%mm6, %%mm3 \n\t" // H5 - H6
  1102. "paddw %%mm0, %%mm0 \n\t" // 2L4
  1103. "paddw %%mm1, %%mm1 \n\t" // 2H4
  1104. "psubw %%mm2, %%mm0 \n\t" // 2L4 - L5 + L6
  1105. "psubw %%mm3, %%mm1 \n\t" // 2H4 - H5 + H6
  1106. "psllw $2, %%mm2 \n\t" // 4L5 - 4L6
  1107. "psllw $2, %%mm3 \n\t" // 4H5 - 4H6
  1108. "psubw %%mm2, %%mm0 \n\t" // 2L4 - 5L5 + 5L6
  1109. "psubw %%mm3, %%mm1 \n\t" // 2H4 - 5H5 + 5H6
  1110. "movq (%%ebx, %1, 2), %%mm2 \n\t"
  1111. "movq %%mm2, %%mm3 \n\t"
  1112. "punpcklbw %%mm7, %%mm2 \n\t" // L7
  1113. "punpckhbw %%mm7, %%mm3 \n\t" // H7
  1114. "paddw %%mm2, %%mm2 \n\t" // 2L7
  1115. "paddw %%mm3, %%mm3 \n\t" // 2H7
  1116. "psubw %%mm2, %%mm0 \n\t" // 2L4 - 5L5 + 5L6 - 2L7
  1117. "psubw %%mm3, %%mm1 \n\t" // 2H4 - 5H5 + 5H6 - 2H7
  1118. "movq temp0, %%mm2 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
  1119. "movq temp1, %%mm3 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
  1120. #ifdef HAVE_MMX2
  1121. "movq %%mm7, %%mm6 \n\t" // 0
  1122. "psubw %%mm0, %%mm6 \n\t"
  1123. "pmaxsw %%mm6, %%mm0 \n\t" // |2L4 - 5L5 + 5L6 - 2L7|
  1124. "movq %%mm7, %%mm6 \n\t" // 0
  1125. "psubw %%mm1, %%mm6 \n\t"
  1126. "pmaxsw %%mm6, %%mm1 \n\t" // |2H4 - 5H5 + 5H6 - 2H7|
  1127. "movq %%mm7, %%mm6 \n\t" // 0
  1128. "psubw %%mm2, %%mm6 \n\t"
  1129. "pmaxsw %%mm6, %%mm2 \n\t" // |2L0 - 5L1 + 5L2 - 2L3|
  1130. "movq %%mm7, %%mm6 \n\t" // 0
  1131. "psubw %%mm3, %%mm6 \n\t"
  1132. "pmaxsw %%mm6, %%mm3 \n\t" // |2H0 - 5H1 + 5H2 - 2H3|
  1133. #else
  1134. "movq %%mm7, %%mm6 \n\t" // 0
  1135. "pcmpgtw %%mm0, %%mm6 \n\t"
  1136. "pxor %%mm6, %%mm0 \n\t"
  1137. "psubw %%mm6, %%mm0 \n\t" // |2L4 - 5L5 + 5L6 - 2L7|
  1138. "movq %%mm7, %%mm6 \n\t" // 0
  1139. "pcmpgtw %%mm1, %%mm6 \n\t"
  1140. "pxor %%mm6, %%mm1 \n\t"
  1141. "psubw %%mm6, %%mm1 \n\t" // |2H4 - 5H5 + 5H6 - 2H7|
  1142. "movq %%mm7, %%mm6 \n\t" // 0
  1143. "pcmpgtw %%mm2, %%mm6 \n\t"
  1144. "pxor %%mm6, %%mm2 \n\t"
  1145. "psubw %%mm6, %%mm2 \n\t" // |2L0 - 5L1 + 5L2 - 2L3|
  1146. "movq %%mm7, %%mm6 \n\t" // 0
  1147. "pcmpgtw %%mm3, %%mm6 \n\t"
  1148. "pxor %%mm6, %%mm3 \n\t"
  1149. "psubw %%mm6, %%mm3 \n\t" // |2H0 - 5H1 + 5H2 - 2H3|
  1150. #endif
  1151. #ifdef HAVE_MMX2
  1152. "pminsw %%mm2, %%mm0 \n\t"
  1153. "pminsw %%mm3, %%mm1 \n\t"
  1154. #else
  1155. "movq %%mm0, %%mm6 \n\t"
  1156. "psubusw %%mm2, %%mm6 \n\t"
  1157. "psubw %%mm6, %%mm0 \n\t"
  1158. "movq %%mm1, %%mm6 \n\t"
  1159. "psubusw %%mm3, %%mm6 \n\t"
  1160. "psubw %%mm6, %%mm1 \n\t"
  1161. #endif
  1162. "movq %%mm7, %%mm6 \n\t" // 0
  1163. "pcmpgtw %%mm4, %%mm6 \n\t" // sign(2L2 - 5L3 + 5L4 - 2L5)
  1164. "pxor %%mm6, %%mm4 \n\t"
  1165. "psubw %%mm6, %%mm4 \n\t" // |2L2 - 5L3 + 5L4 - 2L5|
  1166. "pcmpgtw %%mm5, %%mm7 \n\t" // sign(2H2 - 5H3 + 5H4 - 2H5)
  1167. "pxor %%mm7, %%mm5 \n\t"
  1168. "psubw %%mm7, %%mm5 \n\t" // |2H2 - 5H3 + 5H4 - 2H5|
  1169. // 100 opcodes
  1170. "movd %2, %%mm2 \n\t" // QP
  1171. "punpcklwd %%mm2, %%mm2 \n\t"
  1172. "punpcklwd %%mm2, %%mm2 \n\t"
  1173. "psllw $3, %%mm2 \n\t" // 8QP
  1174. "movq %%mm2, %%mm3 \n\t" // 8QP
  1175. "pcmpgtw %%mm4, %%mm2 \n\t"
  1176. "pcmpgtw %%mm5, %%mm3 \n\t"
  1177. "pand %%mm2, %%mm4 \n\t"
  1178. "pand %%mm3, %%mm5 \n\t"
  1179. "psubusw %%mm0, %%mm4 \n\t" // hd
  1180. "psubusw %%mm1, %%mm5 \n\t" // ld
  1181. "movq w05, %%mm2 \n\t" // 5
  1182. "pmullw %%mm2, %%mm4 \n\t"
  1183. "pmullw %%mm2, %%mm5 \n\t"
  1184. "movq w20, %%mm2 \n\t" // 32
  1185. "paddw %%mm2, %%mm4 \n\t"
  1186. "paddw %%mm2, %%mm5 \n\t"
  1187. "psrlw $6, %%mm4 \n\t"
  1188. "psrlw $6, %%mm5 \n\t"
  1189. /*
  1190. "movq w06, %%mm2 \n\t" // 6
  1191. "paddw %%mm2, %%mm4 \n\t"
  1192. "paddw %%mm2, %%mm5 \n\t"
  1193. "movq w1400, %%mm2 \n\t" // 1400h = 5120 = 5/64*2^16
  1194. //FIXME if *5/64 is supposed to be /13 then we should use 5041 instead of 5120
  1195. "pmulhw %%mm2, %%mm4 \n\t" // hd/13
  1196. "pmulhw %%mm2, %%mm5 \n\t" // ld/13
  1197. */
  1198. "movq temp2, %%mm0 \n\t" // L3 - L4
  1199. "movq temp3, %%mm1 \n\t" // H3 - H4
  1200. "pxor %%mm2, %%mm2 \n\t"
  1201. "pxor %%mm3, %%mm3 \n\t"
  1202. "pcmpgtw %%mm0, %%mm2 \n\t" // sign (L3-L4)
  1203. "pcmpgtw %%mm1, %%mm3 \n\t" // sign (H3-H4)
  1204. "pxor %%mm2, %%mm0 \n\t"
  1205. "pxor %%mm3, %%mm1 \n\t"
  1206. "psubw %%mm2, %%mm0 \n\t" // |L3-L4|
  1207. "psubw %%mm3, %%mm1 \n\t" // |H3-H4|
  1208. "psrlw $1, %%mm0 \n\t" // |L3 - L4|/2
  1209. "psrlw $1, %%mm1 \n\t" // |H3 - H4|/2
  1210. "pxor %%mm6, %%mm2 \n\t"
  1211. "pxor %%mm7, %%mm3 \n\t"
  1212. "pand %%mm2, %%mm4 \n\t"
  1213. "pand %%mm3, %%mm5 \n\t"
  1214. #ifdef HAVE_MMX2
  1215. "pminsw %%mm0, %%mm4 \n\t"
  1216. "pminsw %%mm1, %%mm5 \n\t"
  1217. #else
  1218. "movq %%mm4, %%mm2 \n\t"
  1219. "psubusw %%mm0, %%mm2 \n\t"
  1220. "psubw %%mm2, %%mm4 \n\t"
  1221. "movq %%mm5, %%mm2 \n\t"
  1222. "psubusw %%mm1, %%mm2 \n\t"
  1223. "psubw %%mm2, %%mm5 \n\t"
  1224. #endif
  1225. "pxor %%mm6, %%mm4 \n\t"
  1226. "pxor %%mm7, %%mm5 \n\t"
  1227. "psubw %%mm6, %%mm4 \n\t"
  1228. "psubw %%mm7, %%mm5 \n\t"
  1229. "packsswb %%mm5, %%mm4 \n\t"
  1230. "movq (%%eax, %1, 2), %%mm0 \n\t"
  1231. "paddb %%mm4, %%mm0 \n\t"
  1232. "movq %%mm0, (%%eax, %1, 2) \n\t"
  1233. "movq (%0, %1, 4), %%mm0 \n\t"
  1234. "psubb %%mm4, %%mm0 \n\t"
  1235. "movq %%mm0, (%0, %1, 4) \n\t"
  1236. :
  1237. : "r" (src), "r" (stride), "r" (QP)
  1238. : "%eax", "%ebx"
  1239. );
  1240. #else
  1241. const int l1= stride;
  1242. const int l2= stride + l1;
  1243. const int l3= stride + l2;
  1244. const int l4= stride + l3;
  1245. const int l5= stride + l4;
  1246. const int l6= stride + l5;
  1247. const int l7= stride + l6;
  1248. const int l8= stride + l7;
  1249. // const int l9= stride + l8;
  1250. int x;
  1251. src+= stride*3;
  1252. for(x=0; x<BLOCK_SIZE; x++)
  1253. {
  1254. const int middleEnergy= 5*(src[l5] - src[l4]) + 2*(src[l3] - src[l6]);
  1255. if(ABS(middleEnergy) < 8*QP)
  1256. {
  1257. const int q=(src[l4] - src[l5])/2;
  1258. const int leftEnergy= 5*(src[l3] - src[l2]) + 2*(src[l1] - src[l4]);
  1259. const int rightEnergy= 5*(src[l7] - src[l6]) + 2*(src[l5] - src[l8]);
  1260. int d= ABS(middleEnergy) - MIN( ABS(leftEnergy), ABS(rightEnergy) );
  1261. d= MAX(d, 0);
  1262. d= (5*d + 32) >> 6;
  1263. d*= SIGN(-middleEnergy);
  1264. if(q>0)
  1265. {
  1266. d= d<0 ? 0 : d;
  1267. d= d>q ? q : d;
  1268. }
  1269. else
  1270. {
  1271. d= d>0 ? 0 : d;
  1272. d= d<q ? q : d;
  1273. }
  1274. src[l4]-= d;
  1275. src[l5]+= d;
  1276. }
  1277. src++;
  1278. }
  1279. #endif
  1280. }
  1281. static inline void dering(uint8_t src[], int stride, int QP)
  1282. {
  1283. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1284. asm volatile(
  1285. "movq pQPb, %%mm0 \n\t"
  1286. "paddusb %%mm0, %%mm0 \n\t"
  1287. "movq %%mm0, pQPb2 \n\t"
  1288. "leal (%0, %1), %%eax \n\t"
  1289. "leal (%%eax, %1, 4), %%ebx \n\t"
  1290. // 0 1 2 3 4 5 6 7 8 9
  1291. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1292. "pcmpeqb %%mm6, %%mm6 \n\t"
  1293. "pxor %%mm7, %%mm7 \n\t"
  1294. #ifdef HAVE_MMX2
  1295. #define FIND_MIN_MAX(addr)\
  1296. "movq " #addr ", %%mm0 \n\t"\
  1297. "pminub %%mm0, %%mm6 \n\t"\
  1298. "pmaxub %%mm0, %%mm7 \n\t"
  1299. #else
  1300. #define FIND_MIN_MAX(addr)\
  1301. "movq " #addr ", %%mm0 \n\t"\
  1302. "movq %%mm6, %%mm1 \n\t"\
  1303. "psubusb %%mm0, %%mm7 \n\t"\
  1304. "paddb %%mm0, %%mm7 \n\t"\
  1305. "psubusb %%mm0, %%mm1 \n\t"\
  1306. "psubb %%mm1, %%mm6 \n\t"
  1307. #endif
  1308. FIND_MIN_MAX((%%eax))
  1309. FIND_MIN_MAX((%%eax, %1))
  1310. FIND_MIN_MAX((%%eax, %1, 2))
  1311. FIND_MIN_MAX((%0, %1, 4))
  1312. FIND_MIN_MAX((%%ebx))
  1313. FIND_MIN_MAX((%%ebx, %1))
  1314. FIND_MIN_MAX((%%ebx, %1, 2))
  1315. FIND_MIN_MAX((%0, %1, 8))
  1316. "movq %%mm6, %%mm4 \n\t"
  1317. "psrlq $8, %%mm6 \n\t"
  1318. #ifdef HAVE_MMX2
  1319. "pminub %%mm4, %%mm6 \n\t" // min of pixels
  1320. "pshufw $0xF9, %%mm6, %%mm4 \n\t"
  1321. "pminub %%mm4, %%mm6 \n\t" // min of pixels
  1322. "pshufw $0xFE, %%mm6, %%mm4 \n\t"
  1323. "pminub %%mm4, %%mm6 \n\t"
  1324. #else
  1325. "movq %%mm6, %%mm1 \n\t"
  1326. "psubusb %%mm4, %%mm1 \n\t"
  1327. "psubb %%mm1, %%mm6 \n\t"
  1328. "movq %%mm6, %%mm4 \n\t"
  1329. "psrlq $16, %%mm6 \n\t"
  1330. "movq %%mm6, %%mm1 \n\t"
  1331. "psubusb %%mm4, %%mm1 \n\t"
  1332. "psubb %%mm1, %%mm6 \n\t"
  1333. "movq %%mm6, %%mm4 \n\t"
  1334. "psrlq $32, %%mm6 \n\t"
  1335. "movq %%mm6, %%mm1 \n\t"
  1336. "psubusb %%mm4, %%mm1 \n\t"
  1337. "psubb %%mm1, %%mm6 \n\t"
  1338. #endif
  1339. "movq %%mm7, %%mm4 \n\t"
  1340. "psrlq $8, %%mm7 \n\t"
  1341. #ifdef HAVE_MMX2
  1342. "pmaxub %%mm4, %%mm7 \n\t" // max of pixels
  1343. "pshufw $0xF9, %%mm7, %%mm4 \n\t"
  1344. "pmaxub %%mm4, %%mm7 \n\t"
  1345. "pshufw $0xFE, %%mm7, %%mm4 \n\t"
  1346. "pmaxub %%mm4, %%mm7 \n\t"
  1347. #else
  1348. "psubusb %%mm4, %%mm7 \n\t"
  1349. "paddb %%mm4, %%mm7 \n\t"
  1350. "movq %%mm7, %%mm4 \n\t"
  1351. "psrlq $16, %%mm7 \n\t"
  1352. "psubusb %%mm4, %%mm7 \n\t"
  1353. "paddb %%mm4, %%mm7 \n\t"
  1354. "movq %%mm7, %%mm4 \n\t"
  1355. "psrlq $32, %%mm7 \n\t"
  1356. "psubusb %%mm4, %%mm7 \n\t"
  1357. "paddb %%mm4, %%mm7 \n\t"
  1358. #endif
  1359. PAVGB(%%mm6, %%mm7) // a=(max + min)/2
  1360. "punpcklbw %%mm7, %%mm7 \n\t"
  1361. "punpcklbw %%mm7, %%mm7 \n\t"
  1362. "punpcklbw %%mm7, %%mm7 \n\t"
  1363. "movq %%mm7, temp0 \n\t"
  1364. "movq (%0), %%mm0 \n\t" // L10
  1365. "movq %%mm0, %%mm1 \n\t" // L10
  1366. "movq %%mm0, %%mm2 \n\t" // L10
  1367. "psllq $8, %%mm1 \n\t"
  1368. "psrlq $8, %%mm2 \n\t"
  1369. "movd -4(%0), %%mm3 \n\t"
  1370. "movd 8(%0), %%mm4 \n\t"
  1371. "psrlq $24, %%mm3 \n\t"
  1372. "psllq $56, %%mm4 \n\t"
  1373. "por %%mm3, %%mm1 \n\t" // L00
  1374. "por %%mm4, %%mm2 \n\t" // L20
  1375. "movq %%mm1, %%mm3 \n\t" // L00
  1376. PAVGB(%%mm2, %%mm1) // (L20 + L00)/2
  1377. PAVGB(%%mm0, %%mm1) // (L20 + L00 + 2L10)/4
  1378. "psubusb %%mm7, %%mm0 \n\t"
  1379. "psubusb %%mm7, %%mm2 \n\t"
  1380. "psubusb %%mm7, %%mm3 \n\t"
  1381. "pcmpeqb b00, %%mm0 \n\t" // L10 > a ? 0 : -1
  1382. "pcmpeqb b00, %%mm2 \n\t" // L20 > a ? 0 : -1
  1383. "pcmpeqb b00, %%mm3 \n\t" // L00 > a ? 0 : -1
  1384. "paddb %%mm2, %%mm0 \n\t"
  1385. "paddb %%mm3, %%mm0 \n\t"
  1386. "movq (%%eax), %%mm2 \n\t" // L11
  1387. "movq %%mm2, %%mm3 \n\t" // L11
  1388. "movq %%mm2, %%mm4 \n\t" // L11
  1389. "psllq $8, %%mm3 \n\t"
  1390. "psrlq $8, %%mm4 \n\t"
  1391. "movd -4(%%eax), %%mm5 \n\t"
  1392. "movd 8(%%eax), %%mm6 \n\t"
  1393. "psrlq $24, %%mm5 \n\t"
  1394. "psllq $56, %%mm6 \n\t"
  1395. "por %%mm5, %%mm3 \n\t" // L01
  1396. "por %%mm6, %%mm4 \n\t" // L21
  1397. "movq %%mm3, %%mm5 \n\t" // L01
  1398. PAVGB(%%mm4, %%mm3) // (L21 + L01)/2
  1399. PAVGB(%%mm2, %%mm3) // (L21 + L01 + 2L11)/4
  1400. "psubusb %%mm7, %%mm2 \n\t"
  1401. "psubusb %%mm7, %%mm4 \n\t"
  1402. "psubusb %%mm7, %%mm5 \n\t"
  1403. "pcmpeqb b00, %%mm2 \n\t" // L11 > a ? 0 : -1
  1404. "pcmpeqb b00, %%mm4 \n\t" // L21 > a ? 0 : -1
  1405. "pcmpeqb b00, %%mm5 \n\t" // L01 > a ? 0 : -1
  1406. "paddb %%mm4, %%mm2 \n\t"
  1407. "paddb %%mm5, %%mm2 \n\t"
  1408. // 0, 2, 3, 1
  1409. #define DERING_CORE(dst,src,ppsx,psx,sx,pplx,plx,lx,t0,t1) \
  1410. "movq " #src ", " #sx " \n\t" /* src[0] */\
  1411. "movq " #sx ", " #lx " \n\t" /* src[0] */\
  1412. "movq " #sx ", " #t0 " \n\t" /* src[0] */\
  1413. "psllq $8, " #lx " \n\t"\
  1414. "psrlq $8, " #t0 " \n\t"\
  1415. "movd -4" #src ", " #t1 " \n\t"\
  1416. "psrlq $24, " #t1 " \n\t"\
  1417. "por " #t1 ", " #lx " \n\t" /* src[-1] */\
  1418. "movd 8" #src ", " #t1 " \n\t"\
  1419. "psllq $56, " #t1 " \n\t"\
  1420. "por " #t1 ", " #t0 " \n\t" /* src[+1] */\
  1421. "movq " #lx ", " #t1 " \n\t" /* src[-1] */\
  1422. PAVGB(t0, lx) /* (src[-1] + src[+1])/2 */\
  1423. PAVGB(sx, lx) /* (src[-1] + 2src[0] + src[+1])/4 */\
  1424. PAVGB(lx, pplx) \
  1425. "movq " #lx ", temp1 \n\t"\
  1426. "movq temp0, " #lx " \n\t"\
  1427. "psubusb " #lx ", " #t1 " \n\t"\
  1428. "psubusb " #lx ", " #t0 " \n\t"\
  1429. "psubusb " #lx ", " #sx " \n\t"\
  1430. "movq b00, " #lx " \n\t"\
  1431. "pcmpeqb " #lx ", " #t1 " \n\t" /* src[-1] > a ? 0 : -1*/\
  1432. "pcmpeqb " #lx ", " #t0 " \n\t" /* src[+1] > a ? 0 : -1*/\
  1433. "pcmpeqb " #lx ", " #sx " \n\t" /* src[0] > a ? 0 : -1*/\
  1434. "paddb " #t1 ", " #t0 " \n\t"\
  1435. "paddb " #t0 ", " #sx " \n\t"\
  1436. \
  1437. PAVGB(plx, pplx) /* filtered */\
  1438. "movq " #dst ", " #t0 " \n\t" /* dst */\
  1439. "movq " #t0 ", " #t1 " \n\t" /* dst */\
  1440. "psubusb pQPb2, " #t0 " \n\t"\
  1441. "paddusb pQPb2, " #t1 " \n\t"\
  1442. PMAXUB(t0, pplx)\
  1443. PMINUB(t1, pplx, t0)\
  1444. "paddb " #sx ", " #ppsx " \n\t"\
  1445. "paddb " #psx ", " #ppsx " \n\t"\
  1446. "#paddb b02, " #ppsx " \n\t"\
  1447. "pand b08, " #ppsx " \n\t"\
  1448. "pcmpeqb " #lx ", " #ppsx " \n\t"\
  1449. "pand " #ppsx ", " #pplx " \n\t"\
  1450. "pandn " #dst ", " #ppsx " \n\t"\
  1451. "por " #pplx ", " #ppsx " \n\t"\
  1452. "movq " #ppsx ", " #dst " \n\t"\
  1453. "movq temp1, " #lx " \n\t"
  1454. /*
  1455. 0000000
  1456. 1111111
  1457. 1111110
  1458. 1111101
  1459. 1111100
  1460. 1111011
  1461. 1111010
  1462. 1111001
  1463. 1111000
  1464. 1110111
  1465. */
  1466. //DERING_CORE(dst,src ,ppsx ,psx ,sx ,pplx ,plx ,lx ,t0 ,t1)
  1467. DERING_CORE((%%eax),(%%eax, %1) ,%%mm0,%%mm2,%%mm4,%%mm1,%%mm3,%%mm5,%%mm6,%%mm7)
  1468. DERING_CORE((%%eax, %1),(%%eax, %1, 2) ,%%mm2,%%mm4,%%mm0,%%mm3,%%mm5,%%mm1,%%mm6,%%mm7)
  1469. DERING_CORE((%%eax, %1, 2),(%0, %1, 4) ,%%mm4,%%mm0,%%mm2,%%mm5,%%mm1,%%mm3,%%mm6,%%mm7)
  1470. DERING_CORE((%0, %1, 4),(%%ebx) ,%%mm0,%%mm2,%%mm4,%%mm1,%%mm3,%%mm5,%%mm6,%%mm7)
  1471. DERING_CORE((%%ebx),(%%ebx, %1) ,%%mm2,%%mm4,%%mm0,%%mm3,%%mm5,%%mm1,%%mm6,%%mm7)
  1472. DERING_CORE((%%ebx, %1), (%%ebx, %1, 2),%%mm4,%%mm0,%%mm2,%%mm5,%%mm1,%%mm3,%%mm6,%%mm7)
  1473. DERING_CORE((%%ebx, %1, 2),(%0, %1, 8) ,%%mm0,%%mm2,%%mm4,%%mm1,%%mm3,%%mm5,%%mm6,%%mm7)
  1474. DERING_CORE((%0, %1, 8),(%%ebx, %1, 4) ,%%mm2,%%mm4,%%mm0,%%mm3,%%mm5,%%mm1,%%mm6,%%mm7)
  1475. : : "r" (src), "r" (stride), "r" (QP)
  1476. : "%eax", "%ebx"
  1477. );
  1478. #else
  1479. int y;
  1480. int min=255;
  1481. int max=0;
  1482. int avg;
  1483. uint8_t *p;
  1484. int s[10];
  1485. for(y=1; y<9; y++)
  1486. {
  1487. int x;
  1488. p= src + stride*y;
  1489. for(x=1; x<9; x++)
  1490. {
  1491. p++;
  1492. if(*p > max) max= *p;
  1493. if(*p < min) min= *p;
  1494. }
  1495. }
  1496. avg= (min + max + 1)/2;
  1497. for(y=0; y<10; y++)
  1498. {
  1499. int x;
  1500. int t = 0;
  1501. p= src + stride*y;
  1502. for(x=0; x<10; x++)
  1503. {
  1504. if(*p > avg) t |= (1<<x);
  1505. p++;
  1506. }
  1507. t |= (~t)<<16;
  1508. t &= (t<<1) & (t>>1);
  1509. s[y] = t;
  1510. }
  1511. for(y=1; y<9; y++)
  1512. {
  1513. int x;
  1514. int t = s[y-1] & s[y] & s[y+1];
  1515. t|= t>>16;
  1516. p= src + stride*y;
  1517. for(x=1; x<9; x++)
  1518. {
  1519. p++;
  1520. if(t & (1<<x))
  1521. {
  1522. int f= (*(p-stride-1)) + 2*(*(p-stride)) + (*(p-stride+1))
  1523. +2*(*(p -1)) + 4*(*p ) + 2*(*(p +1))
  1524. +(*(p+stride-1)) + 2*(*(p+stride)) + (*(p+stride+1));
  1525. f= (f + 8)>>4;
  1526. if (*p + 2*QP < f) *p= *p + 2*QP;
  1527. else if(*p - 2*QP > f) *p= *p - 2*QP;
  1528. else *p=f;
  1529. }
  1530. }
  1531. }
  1532. #endif
  1533. }
  1534. /**
  1535. * Deinterlaces the given block
  1536. * will be called for every 8x8 block and can read & write from line 4-15
  1537. * lines 0-3 have been passed through the deblock / dering filters allready, but can be read too
  1538. * lines 4-12 will be read into the deblocking filter and should be deinterlaced
  1539. */
  1540. static inline void deInterlaceInterpolateLinear(uint8_t src[], int stride)
  1541. {
  1542. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1543. src+= 4*stride;
  1544. asm volatile(
  1545. "leal (%0, %1), %%eax \n\t"
  1546. "leal (%%eax, %1, 4), %%ebx \n\t"
  1547. // 0 1 2 3 4 5 6 7 8 9
  1548. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1549. "movq (%0), %%mm0 \n\t"
  1550. "movq (%%eax, %1), %%mm1 \n\t"
  1551. PAVGB(%%mm1, %%mm0)
  1552. "movq %%mm0, (%%eax) \n\t"
  1553. "movq (%0, %1, 4), %%mm0 \n\t"
  1554. PAVGB(%%mm0, %%mm1)
  1555. "movq %%mm1, (%%eax, %1, 2) \n\t"
  1556. "movq (%%ebx, %1), %%mm1 \n\t"
  1557. PAVGB(%%mm1, %%mm0)
  1558. "movq %%mm0, (%%ebx) \n\t"
  1559. "movq (%0, %1, 8), %%mm0 \n\t"
  1560. PAVGB(%%mm0, %%mm1)
  1561. "movq %%mm1, (%%ebx, %1, 2) \n\t"
  1562. : : "r" (src), "r" (stride)
  1563. : "%eax", "%ebx"
  1564. );
  1565. #else
  1566. int x;
  1567. src+= 4*stride;
  1568. for(x=0; x<8; x++)
  1569. {
  1570. src[stride] = (src[0] + src[stride*2])>>1;
  1571. src[stride*3] = (src[stride*2] + src[stride*4])>>1;
  1572. src[stride*5] = (src[stride*4] + src[stride*6])>>1;
  1573. src[stride*7] = (src[stride*6] + src[stride*8])>>1;
  1574. src++;
  1575. }
  1576. #endif
  1577. }
  1578. /**
  1579. * Deinterlaces the given block
  1580. * will be called for every 8x8 block and can read & write from line 4-15
  1581. * lines 0-3 have been passed through the deblock / dering filters allready, but can be read too
  1582. * lines 4-12 will be read into the deblocking filter and should be deinterlaced
  1583. * this filter will read lines 3-15 and write 7-13
  1584. * no cliping in C version
  1585. */
  1586. static inline void deInterlaceInterpolateCubic(uint8_t src[], int stride)
  1587. {
  1588. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1589. src+= stride*3;
  1590. asm volatile(
  1591. "leal (%0, %1), %%eax \n\t"
  1592. "leal (%%eax, %1, 4), %%ebx \n\t"
  1593. "leal (%%ebx, %1, 4), %%ecx \n\t"
  1594. "addl %1, %%ecx \n\t"
  1595. "pxor %%mm7, %%mm7 \n\t"
  1596. // 0 1 2 3 4 5 6 7 8 9 10
  1597. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1 ecx
  1598. #define DEINT_CUBIC(a,b,c,d,e)\
  1599. "movq " #a ", %%mm0 \n\t"\
  1600. "movq " #b ", %%mm1 \n\t"\
  1601. "movq " #d ", %%mm2 \n\t"\
  1602. "movq " #e ", %%mm3 \n\t"\
  1603. PAVGB(%%mm2, %%mm1) /* (b+d) /2 */\
  1604. PAVGB(%%mm3, %%mm0) /* a(a+e) /2 */\
  1605. "movq %%mm0, %%mm2 \n\t"\
  1606. "punpcklbw %%mm7, %%mm0 \n\t"\
  1607. "punpckhbw %%mm7, %%mm2 \n\t"\
  1608. "movq %%mm1, %%mm3 \n\t"\
  1609. "punpcklbw %%mm7, %%mm1 \n\t"\
  1610. "punpckhbw %%mm7, %%mm3 \n\t"\
  1611. "psubw %%mm1, %%mm0 \n\t" /* L(a+e - (b+d))/2 */\
  1612. "psubw %%mm3, %%mm2 \n\t" /* H(a+e - (b+d))/2 */\
  1613. "psraw $3, %%mm0 \n\t" /* L(a+e - (b+d))/16 */\
  1614. "psraw $3, %%mm2 \n\t" /* H(a+e - (b+d))/16 */\
  1615. "psubw %%mm0, %%mm1 \n\t" /* L(9b + 9d - a - e)/16 */\
  1616. "psubw %%mm2, %%mm3 \n\t" /* H(9b + 9d - a - e)/16 */\
  1617. "packuswb %%mm3, %%mm1 \n\t"\
  1618. "movq %%mm1, " #c " \n\t"
  1619. DEINT_CUBIC((%0), (%%eax, %1), (%%eax, %1, 2), (%0, %1, 4), (%%ebx, %1))
  1620. DEINT_CUBIC((%%eax, %1), (%0, %1, 4), (%%ebx), (%%ebx, %1), (%0, %1, 8))
  1621. DEINT_CUBIC((%0, %1, 4), (%%ebx, %1), (%%ebx, %1, 2), (%0, %1, 8), (%%ecx))
  1622. DEINT_CUBIC((%%ebx, %1), (%0, %1, 8), (%%ebx, %1, 4), (%%ecx), (%%ecx, %1, 2))
  1623. : : "r" (src), "r" (stride)
  1624. : "%eax", "%ebx", "ecx"
  1625. );
  1626. #else
  1627. int x;
  1628. src+= stride*3;
  1629. for(x=0; x<8; x++)
  1630. {
  1631. src[stride*3] = (-src[0] + 9*src[stride*2] + 9*src[stride*4] - src[stride*6])>>4;
  1632. src[stride*5] = (-src[stride*2] + 9*src[stride*4] + 9*src[stride*6] - src[stride*8])>>4;
  1633. src[stride*7] = (-src[stride*4] + 9*src[stride*6] + 9*src[stride*8] - src[stride*10])>>4;
  1634. src[stride*9] = (-src[stride*6] + 9*src[stride*8] + 9*src[stride*10] - src[stride*12])>>4;
  1635. src++;
  1636. }
  1637. #endif
  1638. }
  1639. /**
  1640. * Deinterlaces the given block
  1641. * will be called for every 8x8 block and can read & write from line 4-15
  1642. * lines 0-3 have been passed through the deblock / dering filters allready, but can be read too
  1643. * lines 4-12 will be read into the deblocking filter and should be deinterlaced
  1644. * will shift the image up by 1 line (FIXME if this is a problem)
  1645. * this filter will read lines 4-13 and write 4-11
  1646. */
  1647. static inline void deInterlaceBlendLinear(uint8_t src[], int stride)
  1648. {
  1649. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1650. src+= 4*stride;
  1651. asm volatile(
  1652. "leal (%0, %1), %%eax \n\t"
  1653. "leal (%%eax, %1, 4), %%ebx \n\t"
  1654. // 0 1 2 3 4 5 6 7 8 9
  1655. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1656. "movq (%0), %%mm0 \n\t" // L0
  1657. "movq (%%eax, %1), %%mm1 \n\t" // L2
  1658. PAVGB(%%mm1, %%mm0) // L0+L2
  1659. "movq (%%eax), %%mm2 \n\t" // L1
  1660. PAVGB(%%mm2, %%mm0)
  1661. "movq %%mm0, (%0) \n\t"
  1662. "movq (%%eax, %1, 2), %%mm0 \n\t" // L3
  1663. PAVGB(%%mm0, %%mm2) // L1+L3
  1664. PAVGB(%%mm1, %%mm2) // 2L2 + L1 + L3
  1665. "movq %%mm2, (%%eax) \n\t"
  1666. "movq (%0, %1, 4), %%mm2 \n\t" // L4
  1667. PAVGB(%%mm2, %%mm1) // L2+L4
  1668. PAVGB(%%mm0, %%mm1) // 2L3 + L2 + L4
  1669. "movq %%mm1, (%%eax, %1) \n\t"
  1670. "movq (%%ebx), %%mm1 \n\t" // L5
  1671. PAVGB(%%mm1, %%mm0) // L3+L5
  1672. PAVGB(%%mm2, %%mm0) // 2L4 + L3 + L5
  1673. "movq %%mm0, (%%eax, %1, 2) \n\t"
  1674. "movq (%%ebx, %1), %%mm0 \n\t" // L6
  1675. PAVGB(%%mm0, %%mm2) // L4+L6
  1676. PAVGB(%%mm1, %%mm2) // 2L5 + L4 + L6
  1677. "movq %%mm2, (%0, %1, 4) \n\t"
  1678. "movq (%%ebx, %1, 2), %%mm2 \n\t" // L7
  1679. PAVGB(%%mm2, %%mm1) // L5+L7
  1680. PAVGB(%%mm0, %%mm1) // 2L6 + L5 + L7
  1681. "movq %%mm1, (%%ebx) \n\t"
  1682. "movq (%0, %1, 8), %%mm1 \n\t" // L8
  1683. PAVGB(%%mm1, %%mm0) // L6+L8
  1684. PAVGB(%%mm2, %%mm0) // 2L7 + L6 + L8
  1685. "movq %%mm0, (%%ebx, %1) \n\t"
  1686. "movq (%%ebx, %1, 4), %%mm0 \n\t" // L9
  1687. PAVGB(%%mm0, %%mm2) // L7+L9
  1688. PAVGB(%%mm1, %%mm2) // 2L8 + L7 + L9
  1689. "movq %%mm2, (%%ebx, %1, 2) \n\t"
  1690. : : "r" (src), "r" (stride)
  1691. : "%eax", "%ebx"
  1692. );
  1693. #else
  1694. int x;
  1695. src+= 4*stride;
  1696. for(x=0; x<8; x++)
  1697. {
  1698. src[0 ] = (src[0 ] + 2*src[stride ] + src[stride*2])>>2;
  1699. src[stride ] = (src[stride ] + 2*src[stride*2] + src[stride*3])>>2;
  1700. src[stride*2] = (src[stride*2] + 2*src[stride*3] + src[stride*4])>>2;
  1701. src[stride*3] = (src[stride*3] + 2*src[stride*4] + src[stride*5])>>2;
  1702. src[stride*4] = (src[stride*4] + 2*src[stride*5] + src[stride*6])>>2;
  1703. src[stride*5] = (src[stride*5] + 2*src[stride*6] + src[stride*7])>>2;
  1704. src[stride*6] = (src[stride*6] + 2*src[stride*7] + src[stride*8])>>2;
  1705. src[stride*7] = (src[stride*7] + 2*src[stride*8] + src[stride*9])>>2;
  1706. src++;
  1707. }
  1708. #endif
  1709. }
  1710. /**
  1711. * Deinterlaces the given block
  1712. * will be called for every 8x8 block and can read & write from line 4-15,
  1713. * lines 0-3 have been passed through the deblock / dering filters allready, but can be read too
  1714. * lines 4-12 will be read into the deblocking filter and should be deinterlaced
  1715. */
  1716. static inline void deInterlaceMedian(uint8_t src[], int stride)
  1717. {
  1718. #ifdef HAVE_MMX
  1719. src+= 4*stride;
  1720. #ifdef HAVE_MMX2
  1721. asm volatile(
  1722. "leal (%0, %1), %%eax \n\t"
  1723. "leal (%%eax, %1, 4), %%ebx \n\t"
  1724. // 0 1 2 3 4 5 6 7 8 9
  1725. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1726. "movq (%0), %%mm0 \n\t" //
  1727. "movq (%%eax, %1), %%mm2 \n\t" //
  1728. "movq (%%eax), %%mm1 \n\t" //
  1729. "movq %%mm0, %%mm3 \n\t"
  1730. "pmaxub %%mm1, %%mm0 \n\t" //
  1731. "pminub %%mm3, %%mm1 \n\t" //
  1732. "pmaxub %%mm2, %%mm1 \n\t" //
  1733. "pminub %%mm1, %%mm0 \n\t"
  1734. "movq %%mm0, (%%eax) \n\t"
  1735. "movq (%0, %1, 4), %%mm0 \n\t" //
  1736. "movq (%%eax, %1, 2), %%mm1 \n\t" //
  1737. "movq %%mm2, %%mm3 \n\t"
  1738. "pmaxub %%mm1, %%mm2 \n\t" //
  1739. "pminub %%mm3, %%mm1 \n\t" //
  1740. "pmaxub %%mm0, %%mm1 \n\t" //
  1741. "pminub %%mm1, %%mm2 \n\t"
  1742. "movq %%mm2, (%%eax, %1, 2) \n\t"
  1743. "movq (%%ebx), %%mm2 \n\t" //
  1744. "movq (%%ebx, %1), %%mm1 \n\t" //
  1745. "movq %%mm2, %%mm3 \n\t"
  1746. "pmaxub %%mm0, %%mm2 \n\t" //
  1747. "pminub %%mm3, %%mm0 \n\t" //
  1748. "pmaxub %%mm1, %%mm0 \n\t" //
  1749. "pminub %%mm0, %%mm2 \n\t"
  1750. "movq %%mm2, (%%ebx) \n\t"
  1751. "movq (%%ebx, %1, 2), %%mm2 \n\t" //
  1752. "movq (%0, %1, 8), %%mm0 \n\t" //
  1753. "movq %%mm2, %%mm3 \n\t"
  1754. "pmaxub %%mm0, %%mm2 \n\t" //
  1755. "pminub %%mm3, %%mm0 \n\t" //
  1756. "pmaxub %%mm1, %%mm0 \n\t" //
  1757. "pminub %%mm0, %%mm2 \n\t"
  1758. "movq %%mm2, (%%ebx, %1, 2) \n\t"
  1759. : : "r" (src), "r" (stride)
  1760. : "%eax", "%ebx"
  1761. );
  1762. #else // MMX without MMX2
  1763. asm volatile(
  1764. "leal (%0, %1), %%eax \n\t"
  1765. "leal (%%eax, %1, 4), %%ebx \n\t"
  1766. // 0 1 2 3 4 5 6 7 8 9
  1767. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1768. "pxor %%mm7, %%mm7 \n\t"
  1769. #define MEDIAN(a,b,c)\
  1770. "movq " #a ", %%mm0 \n\t"\
  1771. "movq " #b ", %%mm2 \n\t"\
  1772. "movq " #c ", %%mm1 \n\t"\
  1773. "movq %%mm0, %%mm3 \n\t"\
  1774. "movq %%mm1, %%mm4 \n\t"\
  1775. "movq %%mm2, %%mm5 \n\t"\
  1776. "psubusb %%mm1, %%mm3 \n\t"\
  1777. "psubusb %%mm2, %%mm4 \n\t"\
  1778. "psubusb %%mm0, %%mm5 \n\t"\
  1779. "pcmpeqb %%mm7, %%mm3 \n\t"\
  1780. "pcmpeqb %%mm7, %%mm4 \n\t"\
  1781. "pcmpeqb %%mm7, %%mm5 \n\t"\
  1782. "movq %%mm3, %%mm6 \n\t"\
  1783. "pxor %%mm4, %%mm3 \n\t"\
  1784. "pxor %%mm5, %%mm4 \n\t"\
  1785. "pxor %%mm6, %%mm5 \n\t"\
  1786. "por %%mm3, %%mm1 \n\t"\
  1787. "por %%mm4, %%mm2 \n\t"\
  1788. "por %%mm5, %%mm0 \n\t"\
  1789. "pand %%mm2, %%mm0 \n\t"\
  1790. "pand %%mm1, %%mm0 \n\t"\
  1791. "movq %%mm0, " #b " \n\t"
  1792. MEDIAN((%0), (%%eax), (%%eax, %1))
  1793. MEDIAN((%%eax, %1), (%%eax, %1, 2), (%0, %1, 4))
  1794. MEDIAN((%0, %1, 4), (%%ebx), (%%ebx, %1))
  1795. MEDIAN((%%ebx, %1), (%%ebx, %1, 2), (%0, %1, 8))
  1796. : : "r" (src), "r" (stride)
  1797. : "%eax", "%ebx"
  1798. );
  1799. #endif // MMX
  1800. #else
  1801. //FIXME
  1802. int x;
  1803. src+= 4*stride;
  1804. for(x=0; x<8; x++)
  1805. {
  1806. src[0 ] = (src[0 ] + 2*src[stride ] + src[stride*2])>>2;
  1807. src[stride ] = (src[stride ] + 2*src[stride*2] + src[stride*3])>>2;
  1808. src[stride*2] = (src[stride*2] + 2*src[stride*3] + src[stride*4])>>2;
  1809. src[stride*3] = (src[stride*3] + 2*src[stride*4] + src[stride*5])>>2;
  1810. src[stride*4] = (src[stride*4] + 2*src[stride*5] + src[stride*6])>>2;
  1811. src[stride*5] = (src[stride*5] + 2*src[stride*6] + src[stride*7])>>2;
  1812. src[stride*6] = (src[stride*6] + 2*src[stride*7] + src[stride*8])>>2;
  1813. src[stride*7] = (src[stride*7] + 2*src[stride*8] + src[stride*9])>>2;
  1814. src++;
  1815. }
  1816. #endif
  1817. }
  1818. #ifdef HAVE_MMX
  1819. /**
  1820. * transposes and shift the given 8x8 Block into dst1 and dst2
  1821. */
  1822. static inline void transpose1(uint8_t *dst1, uint8_t *dst2, uint8_t *src, int srcStride)
  1823. {
  1824. asm(
  1825. "leal (%0, %1), %%eax \n\t"
  1826. "leal (%%eax, %1, 4), %%ebx \n\t"
  1827. // 0 1 2 3 4 5 6 7 8 9
  1828. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1829. "movq (%0), %%mm0 \n\t" // 12345678
  1830. "movq (%%eax), %%mm1 \n\t" // abcdefgh
  1831. "movq %%mm0, %%mm2 \n\t" // 12345678
  1832. "punpcklbw %%mm1, %%mm0 \n\t" // 1a2b3c4d
  1833. "punpckhbw %%mm1, %%mm2 \n\t" // 5e6f7g8h
  1834. "movq (%%eax, %1), %%mm1 \n\t"
  1835. "movq (%%eax, %1, 2), %%mm3 \n\t"
  1836. "movq %%mm1, %%mm4 \n\t"
  1837. "punpcklbw %%mm3, %%mm1 \n\t"
  1838. "punpckhbw %%mm3, %%mm4 \n\t"
  1839. "movq %%mm0, %%mm3 \n\t"
  1840. "punpcklwd %%mm1, %%mm0 \n\t"
  1841. "punpckhwd %%mm1, %%mm3 \n\t"
  1842. "movq %%mm2, %%mm1 \n\t"
  1843. "punpcklwd %%mm4, %%mm2 \n\t"
  1844. "punpckhwd %%mm4, %%mm1 \n\t"
  1845. "movd %%mm0, 128(%2) \n\t"
  1846. "psrlq $32, %%mm0 \n\t"
  1847. "movd %%mm0, 144(%2) \n\t"
  1848. "movd %%mm3, 160(%2) \n\t"
  1849. "psrlq $32, %%mm3 \n\t"
  1850. "movd %%mm3, 176(%2) \n\t"
  1851. "movd %%mm3, 48(%3) \n\t"
  1852. "movd %%mm2, 192(%2) \n\t"
  1853. "movd %%mm2, 64(%3) \n\t"
  1854. "psrlq $32, %%mm2 \n\t"
  1855. "movd %%mm2, 80(%3) \n\t"
  1856. "movd %%mm1, 96(%3) \n\t"
  1857. "psrlq $32, %%mm1 \n\t"
  1858. "movd %%mm1, 112(%3) \n\t"
  1859. "movq (%0, %1, 4), %%mm0 \n\t" // 12345678
  1860. "movq (%%ebx), %%mm1 \n\t" // abcdefgh
  1861. "movq %%mm0, %%mm2 \n\t" // 12345678
  1862. "punpcklbw %%mm1, %%mm0 \n\t" // 1a2b3c4d
  1863. "punpckhbw %%mm1, %%mm2 \n\t" // 5e6f7g8h
  1864. "movq (%%ebx, %1), %%mm1 \n\t"
  1865. "movq (%%ebx, %1, 2), %%mm3 \n\t"
  1866. "movq %%mm1, %%mm4 \n\t"
  1867. "punpcklbw %%mm3, %%mm1 \n\t"
  1868. "punpckhbw %%mm3, %%mm4 \n\t"
  1869. "movq %%mm0, %%mm3 \n\t"
  1870. "punpcklwd %%mm1, %%mm0 \n\t"
  1871. "punpckhwd %%mm1, %%mm3 \n\t"
  1872. "movq %%mm2, %%mm1 \n\t"
  1873. "punpcklwd %%mm4, %%mm2 \n\t"
  1874. "punpckhwd %%mm4, %%mm1 \n\t"
  1875. "movd %%mm0, 132(%2) \n\t"
  1876. "psrlq $32, %%mm0 \n\t"
  1877. "movd %%mm0, 148(%2) \n\t"
  1878. "movd %%mm3, 164(%2) \n\t"
  1879. "psrlq $32, %%mm3 \n\t"
  1880. "movd %%mm3, 180(%2) \n\t"
  1881. "movd %%mm3, 52(%3) \n\t"
  1882. "movd %%mm2, 196(%2) \n\t"
  1883. "movd %%mm2, 68(%3) \n\t"
  1884. "psrlq $32, %%mm2 \n\t"
  1885. "movd %%mm2, 84(%3) \n\t"
  1886. "movd %%mm1, 100(%3) \n\t"
  1887. "psrlq $32, %%mm1 \n\t"
  1888. "movd %%mm1, 116(%3) \n\t"
  1889. :: "r" (src), "r" (srcStride), "r" (dst1), "r" (dst2)
  1890. : "%eax", "%ebx"
  1891. );
  1892. }
  1893. /**
  1894. * transposes the given 8x8 block
  1895. */
  1896. static inline void transpose2(uint8_t *dst, int dstStride, uint8_t *src)
  1897. {
  1898. asm(
  1899. "leal (%0, %1), %%eax \n\t"
  1900. "leal (%%eax, %1, 4), %%ebx \n\t"
  1901. // 0 1 2 3 4 5 6 7 8 9
  1902. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1903. "movq (%2), %%mm0 \n\t" // 12345678
  1904. "movq 16(%2), %%mm1 \n\t" // abcdefgh
  1905. "movq %%mm0, %%mm2 \n\t" // 12345678
  1906. "punpcklbw %%mm1, %%mm0 \n\t" // 1a2b3c4d
  1907. "punpckhbw %%mm1, %%mm2 \n\t" // 5e6f7g8h
  1908. "movq 32(%2), %%mm1 \n\t"
  1909. "movq 48(%2), %%mm3 \n\t"
  1910. "movq %%mm1, %%mm4 \n\t"
  1911. "punpcklbw %%mm3, %%mm1 \n\t"
  1912. "punpckhbw %%mm3, %%mm4 \n\t"
  1913. "movq %%mm0, %%mm3 \n\t"
  1914. "punpcklwd %%mm1, %%mm0 \n\t"
  1915. "punpckhwd %%mm1, %%mm3 \n\t"
  1916. "movq %%mm2, %%mm1 \n\t"
  1917. "punpcklwd %%mm4, %%mm2 \n\t"
  1918. "punpckhwd %%mm4, %%mm1 \n\t"
  1919. "movd %%mm0, (%0) \n\t"
  1920. "psrlq $32, %%mm0 \n\t"
  1921. "movd %%mm0, (%%eax) \n\t"
  1922. "movd %%mm3, (%%eax, %1) \n\t"
  1923. "psrlq $32, %%mm3 \n\t"
  1924. "movd %%mm3, (%%eax, %1, 2) \n\t"
  1925. "movd %%mm2, (%0, %1, 4) \n\t"
  1926. "psrlq $32, %%mm2 \n\t"
  1927. "movd %%mm2, (%%ebx) \n\t"
  1928. "movd %%mm1, (%%ebx, %1) \n\t"
  1929. "psrlq $32, %%mm1 \n\t"
  1930. "movd %%mm1, (%%ebx, %1, 2) \n\t"
  1931. "movq 64(%2), %%mm0 \n\t" // 12345678
  1932. "movq 80(%2), %%mm1 \n\t" // abcdefgh
  1933. "movq %%mm0, %%mm2 \n\t" // 12345678
  1934. "punpcklbw %%mm1, %%mm0 \n\t" // 1a2b3c4d
  1935. "punpckhbw %%mm1, %%mm2 \n\t" // 5e6f7g8h
  1936. "movq 96(%2), %%mm1 \n\t"
  1937. "movq 112(%2), %%mm3 \n\t"
  1938. "movq %%mm1, %%mm4 \n\t"
  1939. "punpcklbw %%mm3, %%mm1 \n\t"
  1940. "punpckhbw %%mm3, %%mm4 \n\t"
  1941. "movq %%mm0, %%mm3 \n\t"
  1942. "punpcklwd %%mm1, %%mm0 \n\t"
  1943. "punpckhwd %%mm1, %%mm3 \n\t"
  1944. "movq %%mm2, %%mm1 \n\t"
  1945. "punpcklwd %%mm4, %%mm2 \n\t"
  1946. "punpckhwd %%mm4, %%mm1 \n\t"
  1947. "movd %%mm0, 4(%0) \n\t"
  1948. "psrlq $32, %%mm0 \n\t"
  1949. "movd %%mm0, 4(%%eax) \n\t"
  1950. "movd %%mm3, 4(%%eax, %1) \n\t"
  1951. "psrlq $32, %%mm3 \n\t"
  1952. "movd %%mm3, 4(%%eax, %1, 2) \n\t"
  1953. "movd %%mm2, 4(%0, %1, 4) \n\t"
  1954. "psrlq $32, %%mm2 \n\t"
  1955. "movd %%mm2, 4(%%ebx) \n\t"
  1956. "movd %%mm1, 4(%%ebx, %1) \n\t"
  1957. "psrlq $32, %%mm1 \n\t"
  1958. "movd %%mm1, 4(%%ebx, %1, 2) \n\t"
  1959. :: "r" (dst), "r" (dstStride), "r" (src)
  1960. : "%eax", "%ebx"
  1961. );
  1962. }
  1963. #endif
  1964. //static int test=0;
  1965. static void inline tempNoiseReducer(uint8_t *src, int stride,
  1966. uint8_t *tempBlured, uint32_t *tempBluredPast, int *maxNoise)
  1967. {
  1968. #define FAST_L2_DIFF
  1969. //#define L1_DIFF //u should change the thresholds too if u try that one
  1970. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1971. asm volatile(
  1972. "leal (%2, %2, 2), %%eax \n\t" // 3*stride
  1973. "leal (%2, %2, 4), %%ebx \n\t" // 5*stride
  1974. "leal (%%ebx, %2, 2), %%ecx \n\t" // 7*stride
  1975. // 0 1 2 3 4 5 6 7 8 9
  1976. // %x %x+%2 %x+2%2 %x+eax %x+4%2 %x+ebx %x+2eax %x+ecx %x+8%2
  1977. //FIXME reorder?
  1978. #ifdef L1_DIFF //needs mmx2
  1979. "movq (%0), %%mm0 \n\t" // L0
  1980. "psadbw (%1), %%mm0 \n\t" // |L0-R0|
  1981. "movq (%0, %2), %%mm1 \n\t" // L1
  1982. "psadbw (%1, %2), %%mm1 \n\t" // |L1-R1|
  1983. "movq (%0, %2, 2), %%mm2 \n\t" // L2
  1984. "psadbw (%1, %2, 2), %%mm2 \n\t" // |L2-R2|
  1985. "movq (%0, %%eax), %%mm3 \n\t" // L3
  1986. "psadbw (%1, %%eax), %%mm3 \n\t" // |L3-R3|
  1987. "movq (%0, %2, 4), %%mm4 \n\t" // L4
  1988. "paddw %%mm1, %%mm0 \n\t"
  1989. "psadbw (%1, %2, 4), %%mm4 \n\t" // |L4-R4|
  1990. "movq (%0, %%ebx), %%mm5 \n\t" // L5
  1991. "paddw %%mm2, %%mm0 \n\t"
  1992. "psadbw (%1, %%ebx), %%mm5 \n\t" // |L5-R5|
  1993. "movq (%0, %%eax, 2), %%mm6 \n\t" // L6
  1994. "paddw %%mm3, %%mm0 \n\t"
  1995. "psadbw (%1, %%eax, 2), %%mm6 \n\t" // |L6-R6|
  1996. "movq (%0, %%ecx), %%mm7 \n\t" // L7
  1997. "paddw %%mm4, %%mm0 \n\t"
  1998. "psadbw (%1, %%ecx), %%mm7 \n\t" // |L7-R7|
  1999. "paddw %%mm5, %%mm6 \n\t"
  2000. "paddw %%mm7, %%mm6 \n\t"
  2001. "paddw %%mm6, %%mm0 \n\t"
  2002. #elif defined (FAST_L2_DIFF)
  2003. "pcmpeqb %%mm7, %%mm7 \n\t"
  2004. "movq b80, %%mm6 \n\t"
  2005. "pxor %%mm0, %%mm0 \n\t"
  2006. #define L2_DIFF_CORE(a, b)\
  2007. "movq " #a ", %%mm5 \n\t"\
  2008. "movq " #b ", %%mm2 \n\t"\
  2009. "pxor %%mm7, %%mm2 \n\t"\
  2010. PAVGB(%%mm2, %%mm5)\
  2011. "paddb %%mm6, %%mm5 \n\t"\
  2012. "movq %%mm5, %%mm2 \n\t"\
  2013. "psllw $8, %%mm5 \n\t"\
  2014. "pmaddwd %%mm5, %%mm5 \n\t"\
  2015. "pmaddwd %%mm2, %%mm2 \n\t"\
  2016. "paddd %%mm2, %%mm5 \n\t"\
  2017. "psrld $14, %%mm5 \n\t"\
  2018. "paddd %%mm5, %%mm0 \n\t"
  2019. L2_DIFF_CORE((%0), (%1))
  2020. L2_DIFF_CORE((%0, %2), (%1, %2))
  2021. L2_DIFF_CORE((%0, %2, 2), (%1, %2, 2))
  2022. L2_DIFF_CORE((%0, %%eax), (%1, %%eax))
  2023. L2_DIFF_CORE((%0, %2, 4), (%1, %2, 4))
  2024. L2_DIFF_CORE((%0, %%ebx), (%1, %%ebx))
  2025. L2_DIFF_CORE((%0, %%eax,2), (%1, %%eax,2))
  2026. L2_DIFF_CORE((%0, %%ecx), (%1, %%ecx))
  2027. #else
  2028. "pxor %%mm7, %%mm7 \n\t"
  2029. "pxor %%mm0, %%mm0 \n\t"
  2030. #define L2_DIFF_CORE(a, b)\
  2031. "movq " #a ", %%mm5 \n\t"\
  2032. "movq " #b ", %%mm2 \n\t"\
  2033. "movq %%mm5, %%mm1 \n\t"\
  2034. "movq %%mm2, %%mm3 \n\t"\
  2035. "punpcklbw %%mm7, %%mm5 \n\t"\
  2036. "punpckhbw %%mm7, %%mm1 \n\t"\
  2037. "punpcklbw %%mm7, %%mm2 \n\t"\
  2038. "punpckhbw %%mm7, %%mm3 \n\t"\
  2039. "psubw %%mm2, %%mm5 \n\t"\
  2040. "psubw %%mm3, %%mm1 \n\t"\
  2041. "pmaddwd %%mm5, %%mm5 \n\t"\
  2042. "pmaddwd %%mm1, %%mm1 \n\t"\
  2043. "paddd %%mm1, %%mm5 \n\t"\
  2044. "paddd %%mm5, %%mm0 \n\t"
  2045. L2_DIFF_CORE((%0), (%1))
  2046. L2_DIFF_CORE((%0, %2), (%1, %2))
  2047. L2_DIFF_CORE((%0, %2, 2), (%1, %2, 2))
  2048. L2_DIFF_CORE((%0, %%eax), (%1, %%eax))
  2049. L2_DIFF_CORE((%0, %2, 4), (%1, %2, 4))
  2050. L2_DIFF_CORE((%0, %%ebx), (%1, %%ebx))
  2051. L2_DIFF_CORE((%0, %%eax,2), (%1, %%eax,2))
  2052. L2_DIFF_CORE((%0, %%ecx), (%1, %%ecx))
  2053. #endif
  2054. "movq %%mm0, %%mm4 \n\t"
  2055. "psrlq $32, %%mm0 \n\t"
  2056. "paddd %%mm0, %%mm4 \n\t"
  2057. "movd %%mm4, %%ecx \n\t"
  2058. "shll $2, %%ecx \n\t"
  2059. "movl %3, %%ebx \n\t"
  2060. "addl -4(%%ebx), %%ecx \n\t"
  2061. "addl 4(%%ebx), %%ecx \n\t"
  2062. "addl -1024(%%ebx), %%ecx \n\t"
  2063. "addl $4, %%ecx \n\t"
  2064. "addl 1024(%%ebx), %%ecx \n\t"
  2065. "shrl $3, %%ecx \n\t"
  2066. "movl %%ecx, (%%ebx) \n\t"
  2067. "leal (%%eax, %2, 2), %%ebx \n\t" // 5*stride
  2068. // "movl %3, %%ecx \n\t"
  2069. // "movl %%ecx, test \n\t"
  2070. // "jmp 4f \n\t"
  2071. "cmpl 4+maxTmpNoise, %%ecx \n\t"
  2072. " jb 2f \n\t"
  2073. "cmpl 8+maxTmpNoise, %%ecx \n\t"
  2074. " jb 1f \n\t"
  2075. "leal (%%ebx, %2, 2), %%ecx \n\t" // 7*stride
  2076. "movq (%0), %%mm0 \n\t" // L0
  2077. "movq (%0, %2), %%mm1 \n\t" // L1
  2078. "movq (%0, %2, 2), %%mm2 \n\t" // L2
  2079. "movq (%0, %%eax), %%mm3 \n\t" // L3
  2080. "movq (%0, %2, 4), %%mm4 \n\t" // L4
  2081. "movq (%0, %%ebx), %%mm5 \n\t" // L5
  2082. "movq (%0, %%eax, 2), %%mm6 \n\t" // L6
  2083. "movq (%0, %%ecx), %%mm7 \n\t" // L7
  2084. "movq %%mm0, (%1) \n\t" // L0
  2085. "movq %%mm1, (%1, %2) \n\t" // L1
  2086. "movq %%mm2, (%1, %2, 2) \n\t" // L2
  2087. "movq %%mm3, (%1, %%eax) \n\t" // L3
  2088. "movq %%mm4, (%1, %2, 4) \n\t" // L4
  2089. "movq %%mm5, (%1, %%ebx) \n\t" // L5
  2090. "movq %%mm6, (%1, %%eax, 2) \n\t" // L6
  2091. "movq %%mm7, (%1, %%ecx) \n\t" // L7
  2092. "jmp 4f \n\t"
  2093. "1: \n\t"
  2094. "leal (%%ebx, %2, 2), %%ecx \n\t" // 7*stride
  2095. "movq (%0), %%mm0 \n\t" // L0
  2096. "pavgb (%1), %%mm0 \n\t" // L0
  2097. "movq (%0, %2), %%mm1 \n\t" // L1
  2098. "pavgb (%1, %2), %%mm1 \n\t" // L1
  2099. "movq (%0, %2, 2), %%mm2 \n\t" // L2
  2100. "pavgb (%1, %2, 2), %%mm2 \n\t" // L2
  2101. "movq (%0, %%eax), %%mm3 \n\t" // L3
  2102. "pavgb (%1, %%eax), %%mm3 \n\t" // L3
  2103. "movq (%0, %2, 4), %%mm4 \n\t" // L4
  2104. "pavgb (%1, %2, 4), %%mm4 \n\t" // L4
  2105. "movq (%0, %%ebx), %%mm5 \n\t" // L5
  2106. "pavgb (%1, %%ebx), %%mm5 \n\t" // L5
  2107. "movq (%0, %%eax, 2), %%mm6 \n\t" // L6
  2108. "pavgb (%1, %%eax, 2), %%mm6 \n\t" // L6
  2109. "movq (%0, %%ecx), %%mm7 \n\t" // L7
  2110. "pavgb (%1, %%ecx), %%mm7 \n\t" // L7
  2111. "movq %%mm0, (%1) \n\t" // R0
  2112. "movq %%mm1, (%1, %2) \n\t" // R1
  2113. "movq %%mm2, (%1, %2, 2) \n\t" // R2
  2114. "movq %%mm3, (%1, %%eax) \n\t" // R3
  2115. "movq %%mm4, (%1, %2, 4) \n\t" // R4
  2116. "movq %%mm5, (%1, %%ebx) \n\t" // R5
  2117. "movq %%mm6, (%1, %%eax, 2) \n\t" // R6
  2118. "movq %%mm7, (%1, %%ecx) \n\t" // R7
  2119. "movq %%mm0, (%0) \n\t" // L0
  2120. "movq %%mm1, (%0, %2) \n\t" // L1
  2121. "movq %%mm2, (%0, %2, 2) \n\t" // L2
  2122. "movq %%mm3, (%0, %%eax) \n\t" // L3
  2123. "movq %%mm4, (%0, %2, 4) \n\t" // L4
  2124. "movq %%mm5, (%0, %%ebx) \n\t" // L5
  2125. "movq %%mm6, (%0, %%eax, 2) \n\t" // L6
  2126. "movq %%mm7, (%0, %%ecx) \n\t" // L7
  2127. "jmp 4f \n\t"
  2128. "2: \n\t"
  2129. "cmpl maxTmpNoise, %%ecx \n\t"
  2130. " jb 3f \n\t"
  2131. "leal (%%ebx, %2, 2), %%ecx \n\t" // 7*stride
  2132. "movq (%0), %%mm0 \n\t" // L0
  2133. "movq (%0, %2), %%mm1 \n\t" // L1
  2134. "movq (%0, %2, 2), %%mm2 \n\t" // L2
  2135. "movq (%0, %%eax), %%mm3 \n\t" // L3
  2136. "movq (%1), %%mm4 \n\t" // R0
  2137. "movq (%1, %2), %%mm5 \n\t" // R1
  2138. "movq (%1, %2, 2), %%mm6 \n\t" // R2
  2139. "movq (%1, %%eax), %%mm7 \n\t" // R3
  2140. PAVGB(%%mm4, %%mm0)
  2141. PAVGB(%%mm5, %%mm1)
  2142. PAVGB(%%mm6, %%mm2)
  2143. PAVGB(%%mm7, %%mm3)
  2144. PAVGB(%%mm4, %%mm0)
  2145. PAVGB(%%mm5, %%mm1)
  2146. PAVGB(%%mm6, %%mm2)
  2147. PAVGB(%%mm7, %%mm3)
  2148. "movq %%mm0, (%1) \n\t" // R0
  2149. "movq %%mm1, (%1, %2) \n\t" // R1
  2150. "movq %%mm2, (%1, %2, 2) \n\t" // R2
  2151. "movq %%mm3, (%1, %%eax) \n\t" // R3
  2152. "movq %%mm0, (%0) \n\t" // L0
  2153. "movq %%mm1, (%0, %2) \n\t" // L1
  2154. "movq %%mm2, (%0, %2, 2) \n\t" // L2
  2155. "movq %%mm3, (%0, %%eax) \n\t" // L3
  2156. "movq (%0, %2, 4), %%mm0 \n\t" // L4
  2157. "movq (%0, %%ebx), %%mm1 \n\t" // L5
  2158. "movq (%0, %%eax, 2), %%mm2 \n\t" // L6
  2159. "movq (%0, %%ecx), %%mm3 \n\t" // L7
  2160. "movq (%1, %2, 4), %%mm4 \n\t" // R4
  2161. "movq (%1, %%ebx), %%mm5 \n\t" // R5
  2162. "movq (%1, %%eax, 2), %%mm6 \n\t" // R6
  2163. "movq (%1, %%ecx), %%mm7 \n\t" // R7
  2164. PAVGB(%%mm4, %%mm0)
  2165. PAVGB(%%mm5, %%mm1)
  2166. PAVGB(%%mm6, %%mm2)
  2167. PAVGB(%%mm7, %%mm3)
  2168. PAVGB(%%mm4, %%mm0)
  2169. PAVGB(%%mm5, %%mm1)
  2170. PAVGB(%%mm6, %%mm2)
  2171. PAVGB(%%mm7, %%mm3)
  2172. "movq %%mm0, (%1, %2, 4) \n\t" // R4
  2173. "movq %%mm1, (%1, %%ebx) \n\t" // R5
  2174. "movq %%mm2, (%1, %%eax, 2) \n\t" // R6
  2175. "movq %%mm3, (%1, %%ecx) \n\t" // R7
  2176. "movq %%mm0, (%0, %2, 4) \n\t" // L4
  2177. "movq %%mm1, (%0, %%ebx) \n\t" // L5
  2178. "movq %%mm2, (%0, %%eax, 2) \n\t" // L6
  2179. "movq %%mm3, (%0, %%ecx) \n\t" // L7
  2180. "jmp 4f \n\t"
  2181. "3: \n\t"
  2182. "leal (%%ebx, %2, 2), %%ecx \n\t" // 7*stride
  2183. "movq (%0), %%mm0 \n\t" // L0
  2184. "movq (%0, %2), %%mm1 \n\t" // L1
  2185. "movq (%0, %2, 2), %%mm2 \n\t" // L2
  2186. "movq (%0, %%eax), %%mm3 \n\t" // L3
  2187. "movq (%1), %%mm4 \n\t" // R0
  2188. "movq (%1, %2), %%mm5 \n\t" // R1
  2189. "movq (%1, %2, 2), %%mm6 \n\t" // R2
  2190. "movq (%1, %%eax), %%mm7 \n\t" // R3
  2191. PAVGB(%%mm4, %%mm0)
  2192. PAVGB(%%mm5, %%mm1)
  2193. PAVGB(%%mm6, %%mm2)
  2194. PAVGB(%%mm7, %%mm3)
  2195. PAVGB(%%mm4, %%mm0)
  2196. PAVGB(%%mm5, %%mm1)
  2197. PAVGB(%%mm6, %%mm2)
  2198. PAVGB(%%mm7, %%mm3)
  2199. PAVGB(%%mm4, %%mm0)
  2200. PAVGB(%%mm5, %%mm1)
  2201. PAVGB(%%mm6, %%mm2)
  2202. PAVGB(%%mm7, %%mm3)
  2203. "movq %%mm0, (%1) \n\t" // R0
  2204. "movq %%mm1, (%1, %2) \n\t" // R1
  2205. "movq %%mm2, (%1, %2, 2) \n\t" // R2
  2206. "movq %%mm3, (%1, %%eax) \n\t" // R3
  2207. "movq %%mm0, (%0) \n\t" // L0
  2208. "movq %%mm1, (%0, %2) \n\t" // L1
  2209. "movq %%mm2, (%0, %2, 2) \n\t" // L2
  2210. "movq %%mm3, (%0, %%eax) \n\t" // L3
  2211. "movq (%0, %2, 4), %%mm0 \n\t" // L4
  2212. "movq (%0, %%ebx), %%mm1 \n\t" // L5
  2213. "movq (%0, %%eax, 2), %%mm2 \n\t" // L6
  2214. "movq (%0, %%ecx), %%mm3 \n\t" // L7
  2215. "movq (%1, %2, 4), %%mm4 \n\t" // R4
  2216. "movq (%1, %%ebx), %%mm5 \n\t" // R5
  2217. "movq (%1, %%eax, 2), %%mm6 \n\t" // R6
  2218. "movq (%1, %%ecx), %%mm7 \n\t" // R7
  2219. PAVGB(%%mm4, %%mm0)
  2220. PAVGB(%%mm5, %%mm1)
  2221. PAVGB(%%mm6, %%mm2)
  2222. PAVGB(%%mm7, %%mm3)
  2223. PAVGB(%%mm4, %%mm0)
  2224. PAVGB(%%mm5, %%mm1)
  2225. PAVGB(%%mm6, %%mm2)
  2226. PAVGB(%%mm7, %%mm3)
  2227. PAVGB(%%mm4, %%mm0)
  2228. PAVGB(%%mm5, %%mm1)
  2229. PAVGB(%%mm6, %%mm2)
  2230. PAVGB(%%mm7, %%mm3)
  2231. "movq %%mm0, (%1, %2, 4) \n\t" // R4
  2232. "movq %%mm1, (%1, %%ebx) \n\t" // R5
  2233. "movq %%mm2, (%1, %%eax, 2) \n\t" // R6
  2234. "movq %%mm3, (%1, %%ecx) \n\t" // R7
  2235. "movq %%mm0, (%0, %2, 4) \n\t" // L4
  2236. "movq %%mm1, (%0, %%ebx) \n\t" // L5
  2237. "movq %%mm2, (%0, %%eax, 2) \n\t" // L6
  2238. "movq %%mm3, (%0, %%ecx) \n\t" // L7
  2239. "4: \n\t"
  2240. :: "r" (src), "r" (tempBlured), "r"(stride), "m" (tempBluredPast)
  2241. : "%eax", "%ebx", "%ecx", "memory"
  2242. );
  2243. //printf("%d\n", test);
  2244. #else
  2245. int y;
  2246. int d=0;
  2247. int sysd=0;
  2248. int i;
  2249. for(y=0; y<8; y++)
  2250. {
  2251. int x;
  2252. for(x=0; x<8; x++)
  2253. {
  2254. int ref= tempBlured[ x + y*stride ];
  2255. int cur= src[ x + y*stride ];
  2256. int d1=ref - cur;
  2257. // if(x==0 || x==7) d1+= d1>>1;
  2258. // if(y==0 || y==7) d1+= d1>>1;
  2259. // d+= ABS(d1);
  2260. d+= d1*d1;
  2261. sysd+= d1;
  2262. }
  2263. }
  2264. i=d;
  2265. d= (
  2266. 4*d
  2267. +(*(tempBluredPast-256))
  2268. +(*(tempBluredPast-1))+ (*(tempBluredPast+1))
  2269. +(*(tempBluredPast+256))
  2270. +4)>>3;
  2271. *tempBluredPast=i;
  2272. // ((*tempBluredPast)*3 + d + 2)>>2;
  2273. //printf("%d %d %d\n", maxNoise[0], maxNoise[1], maxNoise[2]);
  2274. /*
  2275. Switch between
  2276. 1 0 0 0 0 0 0 (0)
  2277. 64 32 16 8 4 2 1 (1)
  2278. 64 48 36 27 20 15 11 (33) (approx)
  2279. 64 56 49 43 37 33 29 (200) (approx)
  2280. */
  2281. if(d > maxNoise[1])
  2282. {
  2283. if(d < maxNoise[2])
  2284. {
  2285. for(y=0; y<8; y++)
  2286. {
  2287. int x;
  2288. for(x=0; x<8; x++)
  2289. {
  2290. int ref= tempBlured[ x + y*stride ];
  2291. int cur= src[ x + y*stride ];
  2292. tempBlured[ x + y*stride ]=
  2293. src[ x + y*stride ]=
  2294. (ref + cur + 1)>>1;
  2295. }
  2296. }
  2297. }
  2298. else
  2299. {
  2300. for(y=0; y<8; y++)
  2301. {
  2302. int x;
  2303. for(x=0; x<8; x++)
  2304. {
  2305. tempBlured[ x + y*stride ]= src[ x + y*stride ];
  2306. }
  2307. }
  2308. }
  2309. }
  2310. else
  2311. {
  2312. if(d < maxNoise[0])
  2313. {
  2314. for(y=0; y<8; y++)
  2315. {
  2316. int x;
  2317. for(x=0; x<8; x++)
  2318. {
  2319. int ref= tempBlured[ x + y*stride ];
  2320. int cur= src[ x + y*stride ];
  2321. tempBlured[ x + y*stride ]=
  2322. src[ x + y*stride ]=
  2323. (ref*7 + cur + 4)>>3;
  2324. }
  2325. }
  2326. }
  2327. else
  2328. {
  2329. for(y=0; y<8; y++)
  2330. {
  2331. int x;
  2332. for(x=0; x<8; x++)
  2333. {
  2334. int ref= tempBlured[ x + y*stride ];
  2335. int cur= src[ x + y*stride ];
  2336. tempBlured[ x + y*stride ]=
  2337. src[ x + y*stride ]=
  2338. (ref*3 + cur + 2)>>2;
  2339. }
  2340. }
  2341. }
  2342. }
  2343. #endif
  2344. }
  2345. #ifdef HAVE_ODIVX_POSTPROCESS
  2346. #include "../opendivx/postprocess.h"
  2347. int use_old_pp=0;
  2348. #endif
  2349. static void postProcess(uint8_t src[], int srcStride, uint8_t dst[], int dstStride, int width, int height,
  2350. QP_STORE_T QPs[], int QPStride, int isColor, struct PPMode *ppMode);
  2351. /* -pp Command line Help
  2352. NOTE/FIXME: put this at an appropriate place (--help, html docs, man mplayer)?
  2353. -pp <filterName>[:<option>[:<option>...]][,[-]<filterName>[:<option>...]]...
  2354. long form example:
  2355. -pp vdeblock:autoq,hdeblock:autoq,linblenddeint -pp default,-vdeblock
  2356. short form example:
  2357. -pp vb:a,hb:a,lb -pp de,-vb
  2358. more examples:
  2359. -pp tn:64:128:256
  2360. Filters Options
  2361. short long name short long option Description
  2362. * * a autoq cpu power dependant enabler
  2363. c chrom chrominance filtring enabled
  2364. y nochrom chrominance filtring disabled
  2365. hb hdeblock horizontal deblocking filter
  2366. vb vdeblock vertical deblocking filter
  2367. vr rkvdeblock
  2368. h1 x1hdeblock Experimental horizontal deblock filter 1
  2369. v1 x1vdeblock Experimental vertical deblock filter 1
  2370. dr dering not implemented yet
  2371. al autolevels automatic brightness / contrast fixer
  2372. f fullyrange stretch luminance range to (0..255)
  2373. lb linblenddeint linear blend deinterlacer
  2374. li linipoldeint linear interpolating deinterlacer
  2375. ci cubicipoldeint cubic interpolating deinterlacer
  2376. md mediandeint median deinterlacer
  2377. de default hdeblock:a,vdeblock:a,dering:a,autolevels
  2378. fa fast x1hdeblock:a,x1vdeblock:a,dering:a,autolevels
  2379. tn tmpnoise (3 Thresholds) Temporal Noise Reducer
  2380. */
  2381. /**
  2382. * returns a PPMode struct which will have a non 0 error variable if an error occured
  2383. * name is the string after "-pp" on the command line
  2384. * quality is a number from 0 to GET_PP_QUALITY_MAX
  2385. */
  2386. struct PPMode getPPModeByNameAndQuality(char *name, int quality)
  2387. {
  2388. char temp[GET_MODE_BUFFER_SIZE];
  2389. char *p= temp;
  2390. char *filterDelimiters= ",";
  2391. char *optionDelimiters= ":";
  2392. struct PPMode ppMode= {0,0,0,0,0,0,{150,200,400}};
  2393. char *filterToken;
  2394. strncpy(temp, name, GET_MODE_BUFFER_SIZE);
  2395. printf("%s\n", name);
  2396. for(;;){
  2397. char *filterName;
  2398. int q= 1000000; //GET_PP_QUALITY_MAX;
  2399. int chrom=-1;
  2400. char *option;
  2401. char *options[OPTIONS_ARRAY_SIZE];
  2402. int i;
  2403. int filterNameOk=0;
  2404. int numOfUnknownOptions=0;
  2405. int enable=1; //does the user want us to enabled or disabled the filter
  2406. filterToken= strtok(p, filterDelimiters);
  2407. if(filterToken == NULL) break;
  2408. p+= strlen(filterToken) + 1; // p points to next filterToken
  2409. filterName= strtok(filterToken, optionDelimiters);
  2410. printf("%s::%s\n", filterToken, filterName);
  2411. if(*filterName == '-')
  2412. {
  2413. enable=0;
  2414. filterName++;
  2415. }
  2416. for(;;){ //for all options
  2417. option= strtok(NULL, optionDelimiters);
  2418. if(option == NULL) break;
  2419. printf("%s\n", option);
  2420. if(!strcmp("autoq", option) || !strcmp("a", option)) q= quality;
  2421. else if(!strcmp("nochrom", option) || !strcmp("y", option)) chrom=0;
  2422. else if(!strcmp("chrom", option) || !strcmp("c", option)) chrom=1;
  2423. else
  2424. {
  2425. options[numOfUnknownOptions] = option;
  2426. numOfUnknownOptions++;
  2427. }
  2428. if(numOfUnknownOptions >= OPTIONS_ARRAY_SIZE-1) break;
  2429. }
  2430. options[numOfUnknownOptions] = NULL;
  2431. /* replace stuff from the replace Table */
  2432. for(i=0; replaceTable[2*i]!=NULL; i++)
  2433. {
  2434. if(!strcmp(replaceTable[2*i], filterName))
  2435. {
  2436. int newlen= strlen(replaceTable[2*i + 1]);
  2437. int plen;
  2438. int spaceLeft;
  2439. if(p==NULL) p= temp, *p=0; //last filter
  2440. else p--, *p=','; //not last filter
  2441. plen= strlen(p);
  2442. spaceLeft= (int)p - (int)temp + plen;
  2443. if(spaceLeft + newlen >= GET_MODE_BUFFER_SIZE)
  2444. {
  2445. ppMode.error++;
  2446. break;
  2447. }
  2448. memmove(p + newlen, p, plen+1);
  2449. memcpy(p, replaceTable[2*i + 1], newlen);
  2450. filterNameOk=1;
  2451. }
  2452. }
  2453. for(i=0; filters[i].shortName!=NULL; i++)
  2454. {
  2455. // printf("Compareing %s, %s, %s\n", filters[i].shortName,filters[i].longName, filterName);
  2456. if( !strcmp(filters[i].longName, filterName)
  2457. || !strcmp(filters[i].shortName, filterName))
  2458. {
  2459. ppMode.lumMode &= ~filters[i].mask;
  2460. ppMode.chromMode &= ~filters[i].mask;
  2461. filterNameOk=1;
  2462. if(!enable) break; // user wants to disable it
  2463. if(q >= filters[i].minLumQuality)
  2464. ppMode.lumMode|= filters[i].mask;
  2465. if(chrom==1 || (chrom==-1 && filters[i].chromDefault))
  2466. if(q >= filters[i].minChromQuality)
  2467. ppMode.chromMode|= filters[i].mask;
  2468. if(filters[i].mask == LEVEL_FIX)
  2469. {
  2470. int o;
  2471. ppMode.minAllowedY= 16;
  2472. ppMode.maxAllowedY= 234;
  2473. for(o=0; options[o]!=NULL; o++)
  2474. if( !strcmp(options[o],"fullyrange")
  2475. ||!strcmp(options[o],"f"))
  2476. {
  2477. ppMode.minAllowedY= 0;
  2478. ppMode.maxAllowedY= 255;
  2479. numOfUnknownOptions--;
  2480. }
  2481. }
  2482. else if(filters[i].mask == TEMP_NOISE_FILTER)
  2483. {
  2484. int o;
  2485. int numOfNoises=0;
  2486. ppMode.maxTmpNoise[0]= 150;
  2487. ppMode.maxTmpNoise[1]= 200;
  2488. ppMode.maxTmpNoise[2]= 400;
  2489. for(o=0; options[o]!=NULL; o++)
  2490. {
  2491. char *tail;
  2492. ppMode.maxTmpNoise[numOfNoises]=
  2493. strtol(options[o], &tail, 0);
  2494. if(tail!=options[o])
  2495. {
  2496. numOfNoises++;
  2497. numOfUnknownOptions--;
  2498. if(numOfNoises >= 3) break;
  2499. }
  2500. }
  2501. }
  2502. }
  2503. }
  2504. if(!filterNameOk) ppMode.error++;
  2505. ppMode.error += numOfUnknownOptions;
  2506. }
  2507. #ifdef HAVE_ODIVX_POSTPROCESS
  2508. if(ppMode.lumMode & H_DEBLOCK) ppMode.oldMode |= PP_DEBLOCK_Y_H;
  2509. if(ppMode.lumMode & V_DEBLOCK) ppMode.oldMode |= PP_DEBLOCK_Y_V;
  2510. if(ppMode.chromMode & H_DEBLOCK) ppMode.oldMode |= PP_DEBLOCK_C_H;
  2511. if(ppMode.chromMode & V_DEBLOCK) ppMode.oldMode |= PP_DEBLOCK_C_V;
  2512. if(ppMode.lumMode & DERING) ppMode.oldMode |= PP_DERING_Y;
  2513. if(ppMode.chromMode & DERING) ppMode.oldMode |= PP_DERING_C;
  2514. #endif
  2515. return ppMode;
  2516. }
  2517. /**
  2518. * Obsolete, dont use it, use postprocess2() instead
  2519. */
  2520. void postprocess(unsigned char * src[], int src_stride,
  2521. unsigned char * dst[], int dst_stride,
  2522. int horizontal_size, int vertical_size,
  2523. QP_STORE_T *QP_store, int QP_stride,
  2524. int mode)
  2525. {
  2526. struct PPMode ppMode;
  2527. static QP_STORE_T zeroArray[2048/8];
  2528. /*
  2529. static int qual=0;
  2530. ppMode= getPPModeByNameAndQuality("fast,default,-hdeblock,-vdeblock,tmpnoise:150:200:300", qual);
  2531. printf("OK\n");
  2532. qual++;
  2533. qual%=7;
  2534. printf("\n%X %X %X %X :%d: %d %d %d\n", ppMode.lumMode, ppMode.chromMode, ppMode.oldMode, ppMode.error,
  2535. qual, ppMode.maxTmpNoise[0], ppMode.maxTmpNoise[1], ppMode.maxTmpNoise[2]);
  2536. postprocess2(src, src_stride, dst, dst_stride,
  2537. horizontal_size, vertical_size, QP_store, QP_stride, &ppMode);
  2538. return;
  2539. */
  2540. if(QP_store==NULL)
  2541. {
  2542. QP_store= zeroArray;
  2543. QP_stride= 0;
  2544. }
  2545. ppMode.lumMode= mode;
  2546. mode= ((mode&0xFF)>>4) | (mode&0xFFFFFF00);
  2547. ppMode.chromMode= mode;
  2548. ppMode.maxTmpNoise[0]= 700;
  2549. ppMode.maxTmpNoise[1]= 1500;
  2550. ppMode.maxTmpNoise[2]= 3000;
  2551. #ifdef HAVE_ODIVX_POSTPROCESS
  2552. // Note: I could make this shit outside of this file, but it would mean one
  2553. // more function call...
  2554. if(use_old_pp){
  2555. odivx_postprocess(src,src_stride,dst,dst_stride,horizontal_size,vertical_size,QP_store,QP_stride,mode);
  2556. return;
  2557. }
  2558. #endif
  2559. postProcess(src[0], src_stride, dst[0], dst_stride,
  2560. horizontal_size, vertical_size, QP_store, QP_stride, 0, &ppMode);
  2561. horizontal_size >>= 1;
  2562. vertical_size >>= 1;
  2563. src_stride >>= 1;
  2564. dst_stride >>= 1;
  2565. if(1)
  2566. {
  2567. postProcess(src[1], src_stride, dst[1], dst_stride,
  2568. horizontal_size, vertical_size, QP_store, QP_stride, 1, &ppMode);
  2569. postProcess(src[2], src_stride, dst[2], dst_stride,
  2570. horizontal_size, vertical_size, QP_store, QP_stride, 2, &ppMode);
  2571. }
  2572. else
  2573. {
  2574. memset(dst[1], 128, dst_stride*vertical_size);
  2575. memset(dst[2], 128, dst_stride*vertical_size);
  2576. // memcpy(dst[1], src[1], src_stride*horizontal_size);
  2577. // memcpy(dst[2], src[2], src_stride*horizontal_size);
  2578. }
  2579. }
  2580. void postprocess2(unsigned char * src[], int src_stride,
  2581. unsigned char * dst[], int dst_stride,
  2582. int horizontal_size, int vertical_size,
  2583. QP_STORE_T *QP_store, int QP_stride,
  2584. struct PPMode *mode)
  2585. {
  2586. static QP_STORE_T zeroArray[2048/8];
  2587. if(QP_store==NULL)
  2588. {
  2589. QP_store= zeroArray;
  2590. QP_stride= 0;
  2591. }
  2592. #ifdef HAVE_ODIVX_POSTPROCESS
  2593. // Note: I could make this shit outside of this file, but it would mean one
  2594. // more function call...
  2595. if(use_old_pp){
  2596. odivx_postprocess(src,src_stride,dst,dst_stride,horizontal_size,vertical_size,QP_store,QP_stride,
  2597. mode->oldMode);
  2598. return;
  2599. }
  2600. #endif
  2601. postProcess(src[0], src_stride, dst[0], dst_stride,
  2602. horizontal_size, vertical_size, QP_store, QP_stride, 0, mode);
  2603. horizontal_size >>= 1;
  2604. vertical_size >>= 1;
  2605. src_stride >>= 1;
  2606. dst_stride >>= 1;
  2607. postProcess(src[1], src_stride, dst[1], dst_stride,
  2608. horizontal_size, vertical_size, QP_store, QP_stride, 1, mode);
  2609. postProcess(src[2], src_stride, dst[2], dst_stride,
  2610. horizontal_size, vertical_size, QP_store, QP_stride, 2, mode);
  2611. }
  2612. /**
  2613. * gets the mode flags for a given quality (larger values mean slower but better postprocessing)
  2614. * 0 <= quality <= 6
  2615. */
  2616. int getPpModeForQuality(int quality){
  2617. int modes[1+GET_PP_QUALITY_MAX]= {
  2618. 0,
  2619. #if 1
  2620. // horizontal filters first
  2621. LUM_H_DEBLOCK,
  2622. LUM_H_DEBLOCK | LUM_V_DEBLOCK,
  2623. LUM_H_DEBLOCK | LUM_V_DEBLOCK | CHROM_H_DEBLOCK,
  2624. LUM_H_DEBLOCK | LUM_V_DEBLOCK | CHROM_H_DEBLOCK | CHROM_V_DEBLOCK,
  2625. LUM_H_DEBLOCK | LUM_V_DEBLOCK | CHROM_H_DEBLOCK | CHROM_V_DEBLOCK | LUM_DERING,
  2626. LUM_H_DEBLOCK | LUM_V_DEBLOCK | CHROM_H_DEBLOCK | CHROM_V_DEBLOCK | LUM_DERING | CHROM_DERING
  2627. #else
  2628. // vertical filters first
  2629. LUM_V_DEBLOCK,
  2630. LUM_V_DEBLOCK | LUM_H_DEBLOCK,
  2631. LUM_V_DEBLOCK | LUM_H_DEBLOCK | CHROM_V_DEBLOCK,
  2632. LUM_V_DEBLOCK | LUM_H_DEBLOCK | CHROM_V_DEBLOCK | CHROM_H_DEBLOCK,
  2633. LUM_V_DEBLOCK | LUM_H_DEBLOCK | CHROM_V_DEBLOCK | CHROM_H_DEBLOCK | LUM_DERING,
  2634. LUM_V_DEBLOCK | LUM_H_DEBLOCK | CHROM_V_DEBLOCK | CHROM_H_DEBLOCK | LUM_DERING | CHROM_DERING
  2635. #endif
  2636. };
  2637. #ifdef HAVE_ODIVX_POSTPROCESS
  2638. int odivx_modes[1+GET_PP_QUALITY_MAX]= {
  2639. 0,
  2640. PP_DEBLOCK_Y_H,
  2641. PP_DEBLOCK_Y_H|PP_DEBLOCK_Y_V,
  2642. PP_DEBLOCK_Y_H|PP_DEBLOCK_Y_V|PP_DEBLOCK_C_H,
  2643. PP_DEBLOCK_Y_H|PP_DEBLOCK_Y_V|PP_DEBLOCK_C_H|PP_DEBLOCK_C_V,
  2644. PP_DEBLOCK_Y_H|PP_DEBLOCK_Y_V|PP_DEBLOCK_C_H|PP_DEBLOCK_C_V|PP_DERING_Y,
  2645. PP_DEBLOCK_Y_H|PP_DEBLOCK_Y_V|PP_DEBLOCK_C_H|PP_DEBLOCK_C_V|PP_DERING_Y|PP_DERING_C
  2646. };
  2647. if(use_old_pp) return odivx_modes[quality];
  2648. #endif
  2649. return modes[quality];
  2650. }
  2651. /**
  2652. * Copies a block from src to dst and fixes the blacklevel
  2653. * numLines must be a multiple of 4
  2654. * levelFix == 0 -> dont touch the brighness & contrast
  2655. */
  2656. static inline void blockCopy(uint8_t dst[], int dstStride, uint8_t src[], int srcStride,
  2657. int levelFix)
  2658. {
  2659. #ifndef HAVE_MMX
  2660. int i;
  2661. #endif
  2662. if(levelFix)
  2663. {
  2664. #ifdef HAVE_MMX
  2665. asm volatile(
  2666. "leal (%2,%2), %%eax \n\t"
  2667. "leal (%3,%3), %%ebx \n\t"
  2668. "movq packedYOffset, %%mm2 \n\t"
  2669. "movq packedYScale, %%mm3 \n\t"
  2670. "pxor %%mm4, %%mm4 \n\t"
  2671. #define SCALED_CPY \
  2672. "movq (%0), %%mm0 \n\t"\
  2673. "movq (%0), %%mm5 \n\t"\
  2674. "punpcklbw %%mm4, %%mm0 \n\t"\
  2675. "punpckhbw %%mm4, %%mm5 \n\t"\
  2676. "psubw %%mm2, %%mm0 \n\t"\
  2677. "psubw %%mm2, %%mm5 \n\t"\
  2678. "movq (%0,%2), %%mm1 \n\t"\
  2679. "psllw $6, %%mm0 \n\t"\
  2680. "psllw $6, %%mm5 \n\t"\
  2681. "pmulhw %%mm3, %%mm0 \n\t"\
  2682. "movq (%0,%2), %%mm6 \n\t"\
  2683. "pmulhw %%mm3, %%mm5 \n\t"\
  2684. "punpcklbw %%mm4, %%mm1 \n\t"\
  2685. "punpckhbw %%mm4, %%mm6 \n\t"\
  2686. "psubw %%mm2, %%mm1 \n\t"\
  2687. "psubw %%mm2, %%mm6 \n\t"\
  2688. "psllw $6, %%mm1 \n\t"\
  2689. "psllw $6, %%mm6 \n\t"\
  2690. "pmulhw %%mm3, %%mm1 \n\t"\
  2691. "pmulhw %%mm3, %%mm6 \n\t"\
  2692. "addl %%eax, %0 \n\t"\
  2693. "packuswb %%mm5, %%mm0 \n\t"\
  2694. "packuswb %%mm6, %%mm1 \n\t"\
  2695. "movq %%mm0, (%1) \n\t"\
  2696. "movq %%mm1, (%1, %3) \n\t"\
  2697. SCALED_CPY
  2698. "addl %%ebx, %1 \n\t"
  2699. SCALED_CPY
  2700. "addl %%ebx, %1 \n\t"
  2701. SCALED_CPY
  2702. "addl %%ebx, %1 \n\t"
  2703. SCALED_CPY
  2704. : "+r"(src),
  2705. "+r"(dst)
  2706. :"r" (srcStride),
  2707. "r" (dstStride)
  2708. : "%eax", "%ebx"
  2709. );
  2710. #else
  2711. for(i=0; i<8; i++)
  2712. memcpy( &(dst[dstStride*i]),
  2713. &(src[srcStride*i]), BLOCK_SIZE);
  2714. #endif
  2715. }
  2716. else
  2717. {
  2718. #ifdef HAVE_MMX
  2719. asm volatile(
  2720. "pushl %0 \n\t"
  2721. "pushl %1 \n\t"
  2722. "leal (%2,%2), %%eax \n\t"
  2723. "leal (%3,%3), %%ebx \n\t"
  2724. #define SIMPLE_CPY \
  2725. "movq (%0), %%mm0 \n\t"\
  2726. "movq (%0,%2), %%mm1 \n\t"\
  2727. "movq %%mm0, (%1) \n\t"\
  2728. "movq %%mm1, (%1, %3) \n\t"\
  2729. SIMPLE_CPY
  2730. "addl %%eax, %0 \n\t"
  2731. "addl %%ebx, %1 \n\t"
  2732. SIMPLE_CPY
  2733. "addl %%eax, %0 \n\t"
  2734. "addl %%ebx, %1 \n\t"
  2735. SIMPLE_CPY
  2736. "addl %%eax, %0 \n\t"
  2737. "addl %%ebx, %1 \n\t"
  2738. SIMPLE_CPY
  2739. "popl %1 \n\t"
  2740. "popl %0 \n\t"
  2741. : : "r" (src),
  2742. "r" (dst),
  2743. "r" (srcStride),
  2744. "r" (dstStride)
  2745. : "%eax", "%ebx"
  2746. );
  2747. #else
  2748. for(i=0; i<8; i++)
  2749. memcpy( &(dst[dstStride*i]),
  2750. &(src[srcStride*i]), BLOCK_SIZE);
  2751. #endif
  2752. }
  2753. }
  2754. /**
  2755. * Filters array of bytes (Y or U or V values)
  2756. */
  2757. static void postProcess(uint8_t src[], int srcStride, uint8_t dst[], int dstStride, int width, int height,
  2758. QP_STORE_T QPs[], int QPStride, int isColor, struct PPMode *ppMode)
  2759. {
  2760. int x,y;
  2761. const int mode= isColor ? ppMode->chromMode : ppMode->lumMode;
  2762. /* we need 64bit here otherwise we´ll going to have a problem
  2763. after watching a black picture for 5 hours*/
  2764. static uint64_t *yHistogram= NULL;
  2765. int black=0, white=255; // blackest black and whitest white in the picture
  2766. int QPCorrecture= 256;
  2767. /* Temporary buffers for handling the last row(s) */
  2768. static uint8_t *tempDst= NULL;
  2769. static uint8_t *tempSrc= NULL;
  2770. /* Temporary buffers for handling the last block */
  2771. static uint8_t *tempDstBlock= NULL;
  2772. static uint8_t *tempSrcBlock= NULL;
  2773. /* Temporal noise reducing buffers */
  2774. static uint8_t *tempBlured[3]= {NULL,NULL,NULL};
  2775. static uint32_t *tempBluredPast[3]= {NULL,NULL,NULL};
  2776. int copyAhead;
  2777. #ifdef PP_FUNNY_STRIDE
  2778. uint8_t *dstBlockPtrBackup;
  2779. uint8_t *srcBlockPtrBackup;
  2780. #endif
  2781. #ifdef MORE_TIMING
  2782. long long T0, T1, diffTime=0;
  2783. #endif
  2784. #ifdef TIMING
  2785. long long memcpyTime=0, vertTime=0, horizTime=0, sumTime;
  2786. sumTime= rdtsc();
  2787. #endif
  2788. //mode= 0x7F;
  2789. #ifdef HAVE_MMX
  2790. maxTmpNoise[0]= ppMode->maxTmpNoise[0];
  2791. maxTmpNoise[1]= ppMode->maxTmpNoise[1];
  2792. maxTmpNoise[2]= ppMode->maxTmpNoise[2];
  2793. #endif
  2794. if(mode & CUBIC_IPOL_DEINT_FILTER) copyAhead=16;
  2795. else if(mode & LINEAR_BLEND_DEINT_FILTER) copyAhead=14;
  2796. else if( (mode & V_DEBLOCK)
  2797. || (mode & LINEAR_IPOL_DEINT_FILTER)
  2798. || (mode & MEDIAN_DEINT_FILTER)) copyAhead=13;
  2799. else if(mode & V_X1_FILTER) copyAhead=11;
  2800. else if(mode & V_RK1_FILTER) copyAhead=10;
  2801. else if(mode & DERING) copyAhead=9;
  2802. else copyAhead=8;
  2803. copyAhead-= 8;
  2804. if(tempDst==NULL)
  2805. {
  2806. tempDst= (uint8_t*)memalign(8, 1024*24);
  2807. tempSrc= (uint8_t*)memalign(8, 1024*24);
  2808. tempDstBlock= (uint8_t*)memalign(8, 1024*24);
  2809. tempSrcBlock= (uint8_t*)memalign(8, 1024*24);
  2810. }
  2811. if(tempBlured[isColor]==NULL && (mode & TEMP_NOISE_FILTER))
  2812. {
  2813. // printf("%d %d %d\n", isColor, dstStride, height);
  2814. //FIXME works only as long as the size doesnt increase
  2815. //Note:the +17*1024 is just there so i dont have to worry about r/w over te end
  2816. tempBlured[isColor]= (uint8_t*)memalign(8, dstStride*((height+7)&(~7)) + 17*1024);
  2817. tempBluredPast[isColor]= (uint32_t*)memalign(8, 256*((height+7)&(~7))/2 + 17*1024);
  2818. memset(tempBlured[isColor], 0, dstStride*((height+7)&(~7)) + 17*1024);
  2819. memset(tempBluredPast[isColor], 0, 256*((height+7)&(~7))/2 + 17*1024);
  2820. }
  2821. if(!yHistogram)
  2822. {
  2823. int i;
  2824. yHistogram= (uint64_t*)malloc(8*256);
  2825. for(i=0; i<256; i++) yHistogram[i]= width*height/64*15/256;
  2826. if(mode & FULL_Y_RANGE)
  2827. {
  2828. maxAllowedY=255;
  2829. minAllowedY=0;
  2830. }
  2831. }
  2832. if(!isColor)
  2833. {
  2834. uint64_t sum= 0;
  2835. int i;
  2836. static int framenum= -1;
  2837. uint64_t maxClipped;
  2838. uint64_t clipped;
  2839. double scale;
  2840. framenum++;
  2841. if(framenum == 1) yHistogram[0]= width*height/64*15/256;
  2842. for(i=0; i<256; i++)
  2843. {
  2844. sum+= yHistogram[i];
  2845. // printf("%d ", yHistogram[i]);
  2846. }
  2847. // printf("\n\n");
  2848. /* we allways get a completly black picture first */
  2849. maxClipped= (uint64_t)(sum * maxClippedThreshold);
  2850. clipped= sum;
  2851. for(black=255; black>0; black--)
  2852. {
  2853. if(clipped < maxClipped) break;
  2854. clipped-= yHistogram[black];
  2855. }
  2856. clipped= sum;
  2857. for(white=0; white<256; white++)
  2858. {
  2859. if(clipped < maxClipped) break;
  2860. clipped-= yHistogram[white];
  2861. }
  2862. packedYOffset= (black - minAllowedY) & 0xFFFF;
  2863. packedYOffset|= packedYOffset<<32;
  2864. packedYOffset|= packedYOffset<<16;
  2865. scale= (double)(maxAllowedY - minAllowedY) / (double)(white-black);
  2866. packedYScale= (uint16_t)(scale*1024.0 + 0.5);
  2867. packedYScale|= packedYScale<<32;
  2868. packedYScale|= packedYScale<<16;
  2869. }
  2870. else
  2871. {
  2872. packedYScale= 0x0100010001000100LL;
  2873. packedYOffset= 0;
  2874. }
  2875. if(mode & LEVEL_FIX) QPCorrecture= packedYScale &0xFFFF;
  2876. else QPCorrecture= 256;
  2877. /* copy & deinterlace first row of blocks */
  2878. y=-BLOCK_SIZE;
  2879. {
  2880. //1% speedup if these are here instead of the inner loop
  2881. uint8_t *srcBlock= &(src[y*srcStride]);
  2882. uint8_t *dstBlock= &(dst[y*dstStride]);
  2883. dstBlock= tempDst + dstStride;
  2884. // From this point on it is guranteed that we can read and write 16 lines downward
  2885. // finish 1 block before the next otherwise we´ll might have a problem
  2886. // with the L1 Cache of the P4 ... or only a few blocks at a time or soemthing
  2887. for(x=0; x<width; x+=BLOCK_SIZE)
  2888. {
  2889. #ifdef HAVE_MMX2
  2890. /*
  2891. prefetchnta(srcBlock + (((x>>2)&6) + 5)*srcStride + 32);
  2892. prefetchnta(srcBlock + (((x>>2)&6) + 6)*srcStride + 32);
  2893. prefetcht0(dstBlock + (((x>>2)&6) + 5)*dstStride + 32);
  2894. prefetcht0(dstBlock + (((x>>2)&6) + 6)*dstStride + 32);
  2895. */
  2896. asm(
  2897. "movl %4, %%eax \n\t"
  2898. "shrl $2, %%eax \n\t"
  2899. "andl $6, %%eax \n\t"
  2900. "addl %5, %%eax \n\t"
  2901. "movl %%eax, %%ebx \n\t"
  2902. "imul %1, %%eax \n\t"
  2903. "imul %3, %%ebx \n\t"
  2904. "prefetchnta 32(%%eax, %0) \n\t"
  2905. "prefetcht0 32(%%ebx, %2) \n\t"
  2906. "addl %1, %%eax \n\t"
  2907. "addl %3, %%ebx \n\t"
  2908. "prefetchnta 32(%%eax, %0) \n\t"
  2909. "prefetcht0 32(%%ebx, %2) \n\t"
  2910. :: "r" (srcBlock), "r" (srcStride), "r" (dstBlock), "r" (dstStride),
  2911. "m" (x), "m" (copyAhead)
  2912. : "%eax", "%ebx"
  2913. );
  2914. #elif defined(HAVE_3DNOW)
  2915. //FIXME check if this is faster on an 3dnow chip or if its faster without the prefetch or ...
  2916. /* prefetch(srcBlock + (((x>>3)&3) + 5)*srcStride + 32);
  2917. prefetch(srcBlock + (((x>>3)&3) + 9)*srcStride + 32);
  2918. prefetchw(dstBlock + (((x>>3)&3) + 5)*dstStride + 32);
  2919. prefetchw(dstBlock + (((x>>3)&3) + 9)*dstStride + 32);
  2920. */
  2921. #endif
  2922. blockCopy(dstBlock + dstStride*copyAhead, dstStride,
  2923. srcBlock + srcStride*copyAhead, srcStride, mode & LEVEL_FIX);
  2924. if(mode & LINEAR_IPOL_DEINT_FILTER)
  2925. deInterlaceInterpolateLinear(dstBlock, dstStride);
  2926. else if(mode & LINEAR_BLEND_DEINT_FILTER)
  2927. deInterlaceBlendLinear(dstBlock, dstStride);
  2928. else if(mode & MEDIAN_DEINT_FILTER)
  2929. deInterlaceMedian(dstBlock, dstStride);
  2930. else if(mode & CUBIC_IPOL_DEINT_FILTER)
  2931. deInterlaceInterpolateCubic(dstBlock, dstStride);
  2932. /* else if(mode & CUBIC_BLEND_DEINT_FILTER)
  2933. deInterlaceBlendCubic(dstBlock, dstStride);
  2934. */
  2935. dstBlock+=8;
  2936. srcBlock+=8;
  2937. }
  2938. memcpy(&(dst[y*dstStride]) + 8*dstStride, tempDst + 9*dstStride, copyAhead*dstStride );
  2939. }
  2940. for(y=0; y<height; y+=BLOCK_SIZE)
  2941. {
  2942. //1% speedup if these are here instead of the inner loop
  2943. uint8_t *srcBlock= &(src[y*srcStride]);
  2944. uint8_t *dstBlock= &(dst[y*dstStride]);
  2945. #ifdef ARCH_X86
  2946. int *QPptr= isColor ? &QPs[(y>>3)*QPStride] :&QPs[(y>>4)*QPStride];
  2947. int QPDelta= isColor ? 1<<(32-3) : 1<<(32-4);
  2948. int QPFrac= QPDelta;
  2949. uint8_t *tempBlock1= tempBlocks;
  2950. uint8_t *tempBlock2= tempBlocks + 8;
  2951. #endif
  2952. int QP=0;
  2953. /* can we mess with a 8x16 block from srcBlock/dstBlock downwards and 1 line upwards
  2954. if not than use a temporary buffer */
  2955. if(y+15 >= height)
  2956. {
  2957. int i;
  2958. /* copy from line (copyAhead) to (copyAhead+7) of src, these will be copied with
  2959. blockcopy to dst later */
  2960. memcpy(tempSrc + srcStride*copyAhead, srcBlock + srcStride*copyAhead,
  2961. srcStride*MAX(height-y-copyAhead, 0) );
  2962. /* duplicate last line of src to fill the void upto line (copyAhead+7) */
  2963. for(i=MAX(height-y, 8); i<copyAhead+8; i++)
  2964. memcpy(tempSrc + srcStride*i, src + srcStride*(height-1), srcStride);
  2965. /* copy up to (copyAhead+1) lines of dst (line -1 to (copyAhead-1))*/
  2966. memcpy(tempDst, dstBlock - dstStride, dstStride*MIN(height-y+1, copyAhead+1) );
  2967. /* duplicate last line of dst to fill the void upto line (copyAhead) */
  2968. for(i=height-y+1; i<=copyAhead; i++)
  2969. memcpy(tempDst + dstStride*i, dst + dstStride*(height-1), dstStride);
  2970. dstBlock= tempDst + dstStride;
  2971. srcBlock= tempSrc;
  2972. }
  2973. // From this point on it is guranteed that we can read and write 16 lines downward
  2974. // finish 1 block before the next otherwise we´ll might have a problem
  2975. // with the L1 Cache of the P4 ... or only a few blocks at a time or soemthing
  2976. for(x=0; x<width; x+=BLOCK_SIZE)
  2977. {
  2978. const int stride= dstStride;
  2979. uint8_t *tmpXchg;
  2980. #ifdef ARCH_X86
  2981. QP= *QPptr;
  2982. asm volatile(
  2983. "addl %2, %1 \n\t"
  2984. "sbbl %%eax, %%eax \n\t"
  2985. "shll $2, %%eax \n\t"
  2986. "subl %%eax, %0 \n\t"
  2987. : "+r" (QPptr), "+m" (QPFrac)
  2988. : "r" (QPDelta)
  2989. : "%eax"
  2990. );
  2991. #else
  2992. QP= isColor ?
  2993. QPs[(y>>3)*QPStride + (x>>3)]:
  2994. QPs[(y>>4)*QPStride + (x>>4)];
  2995. #endif
  2996. if(!isColor)
  2997. {
  2998. QP= (QP* QPCorrecture)>>8;
  2999. yHistogram[ srcBlock[srcStride*12 + 4] ]++;
  3000. }
  3001. #ifdef HAVE_MMX
  3002. asm volatile(
  3003. "movd %0, %%mm7 \n\t"
  3004. "packuswb %%mm7, %%mm7 \n\t" // 0, 0, 0, QP, 0, 0, 0, QP
  3005. "packuswb %%mm7, %%mm7 \n\t" // 0,QP, 0, QP, 0,QP, 0, QP
  3006. "packuswb %%mm7, %%mm7 \n\t" // QP,..., QP
  3007. "movq %%mm7, pQPb \n\t"
  3008. : : "r" (QP)
  3009. );
  3010. #endif
  3011. #ifdef MORE_TIMING
  3012. T0= rdtsc();
  3013. #endif
  3014. #ifdef HAVE_MMX2
  3015. /*
  3016. prefetchnta(srcBlock + (((x>>2)&6) + 5)*srcStride + 32);
  3017. prefetchnta(srcBlock + (((x>>2)&6) + 6)*srcStride + 32);
  3018. prefetcht0(dstBlock + (((x>>2)&6) + 5)*dstStride + 32);
  3019. prefetcht0(dstBlock + (((x>>2)&6) + 6)*dstStride + 32);
  3020. */
  3021. asm(
  3022. "movl %4, %%eax \n\t"
  3023. "shrl $2, %%eax \n\t"
  3024. "andl $6, %%eax \n\t"
  3025. "addl %5, %%eax \n\t"
  3026. "movl %%eax, %%ebx \n\t"
  3027. "imul %1, %%eax \n\t"
  3028. "imul %3, %%ebx \n\t"
  3029. "prefetchnta 32(%%eax, %0) \n\t"
  3030. "prefetcht0 32(%%ebx, %2) \n\t"
  3031. "addl %1, %%eax \n\t"
  3032. "addl %3, %%ebx \n\t"
  3033. "prefetchnta 32(%%eax, %0) \n\t"
  3034. "prefetcht0 32(%%ebx, %2) \n\t"
  3035. :: "r" (srcBlock), "r" (srcStride), "r" (dstBlock), "r" (dstStride),
  3036. "m" (x), "m" (copyAhead)
  3037. : "%eax", "%ebx"
  3038. );
  3039. #elif defined(HAVE_3DNOW)
  3040. //FIXME check if this is faster on an 3dnow chip or if its faster without the prefetch or ...
  3041. /* prefetch(srcBlock + (((x>>3)&3) + 5)*srcStride + 32);
  3042. prefetch(srcBlock + (((x>>3)&3) + 9)*srcStride + 32);
  3043. prefetchw(dstBlock + (((x>>3)&3) + 5)*dstStride + 32);
  3044. prefetchw(dstBlock + (((x>>3)&3) + 9)*dstStride + 32);
  3045. */
  3046. #endif
  3047. #ifdef PP_FUNNY_STRIDE
  3048. //can we mess with a 8x16 block, if not use a temp buffer, yes again
  3049. if(x+7 >= width)
  3050. {
  3051. int i;
  3052. dstBlockPtrBackup= dstBlock;
  3053. srcBlockPtrBackup= srcBlock;
  3054. for(i=0;i<BLOCK_SIZE*2; i++)
  3055. {
  3056. memcpy(tempSrcBlock+i*srcStride, srcBlock+i*srcStride, width-x);
  3057. memcpy(tempDstBlock+i*dstStride, dstBlock+i*dstStride, width-x);
  3058. }
  3059. dstBlock= tempDstBlock;
  3060. srcBlock= tempSrcBlock;
  3061. }
  3062. #endif
  3063. blockCopy(dstBlock + dstStride*copyAhead, dstStride,
  3064. srcBlock + srcStride*copyAhead, srcStride, mode & LEVEL_FIX);
  3065. if(mode & LINEAR_IPOL_DEINT_FILTER)
  3066. deInterlaceInterpolateLinear(dstBlock, dstStride);
  3067. else if(mode & LINEAR_BLEND_DEINT_FILTER)
  3068. deInterlaceBlendLinear(dstBlock, dstStride);
  3069. else if(mode & MEDIAN_DEINT_FILTER)
  3070. deInterlaceMedian(dstBlock, dstStride);
  3071. else if(mode & CUBIC_IPOL_DEINT_FILTER)
  3072. deInterlaceInterpolateCubic(dstBlock, dstStride);
  3073. /* else if(mode & CUBIC_BLEND_DEINT_FILTER)
  3074. deInterlaceBlendCubic(dstBlock, dstStride);
  3075. */
  3076. /* only deblock if we have 2 blocks */
  3077. if(y + 8 < height)
  3078. {
  3079. #ifdef MORE_TIMING
  3080. T1= rdtsc();
  3081. memcpyTime+= T1-T0;
  3082. T0=T1;
  3083. #endif
  3084. if(mode & V_RK1_FILTER)
  3085. vertRK1Filter(dstBlock, stride, QP);
  3086. else if(mode & V_X1_FILTER)
  3087. vertX1Filter(dstBlock, stride, QP);
  3088. else if(mode & V_DEBLOCK)
  3089. {
  3090. if( isVertDC(dstBlock, stride))
  3091. {
  3092. if(isVertMinMaxOk(dstBlock, stride, QP))
  3093. doVertLowPass(dstBlock, stride, QP);
  3094. }
  3095. else
  3096. doVertDefFilter(dstBlock, stride, QP);
  3097. }
  3098. #ifdef MORE_TIMING
  3099. T1= rdtsc();
  3100. vertTime+= T1-T0;
  3101. T0=T1;
  3102. #endif
  3103. }
  3104. #ifdef HAVE_MMX
  3105. transpose1(tempBlock1, tempBlock2, dstBlock, dstStride);
  3106. #endif
  3107. /* check if we have a previous block to deblock it with dstBlock */
  3108. if(x - 8 >= 0)
  3109. {
  3110. #ifdef MORE_TIMING
  3111. T0= rdtsc();
  3112. #endif
  3113. #ifdef HAVE_MMX
  3114. if(mode & H_RK1_FILTER)
  3115. vertRK1Filter(tempBlock1, 16, QP);
  3116. else if(mode & H_X1_FILTER)
  3117. vertX1Filter(tempBlock1, 16, QP);
  3118. else if(mode & H_DEBLOCK)
  3119. {
  3120. if( isVertDC(tempBlock1, 16) )
  3121. {
  3122. if(isVertMinMaxOk(tempBlock1, 16, QP))
  3123. doVertLowPass(tempBlock1, 16, QP);
  3124. }
  3125. else
  3126. doVertDefFilter(tempBlock1, 16, QP);
  3127. }
  3128. transpose2(dstBlock-4, dstStride, tempBlock1 + 4*16);
  3129. #else
  3130. if(mode & H_X1_FILTER)
  3131. horizX1Filter(dstBlock-4, stride, QP);
  3132. else if(mode & H_DEBLOCK)
  3133. {
  3134. if( isHorizDC(dstBlock-4, stride))
  3135. {
  3136. if(isHorizMinMaxOk(dstBlock-4, stride, QP))
  3137. doHorizLowPass(dstBlock-4, stride, QP);
  3138. }
  3139. else
  3140. doHorizDefFilter(dstBlock-4, stride, QP);
  3141. }
  3142. #endif
  3143. #ifdef MORE_TIMING
  3144. T1= rdtsc();
  3145. horizTime+= T1-T0;
  3146. T0=T1;
  3147. #endif
  3148. if(mode & DERING)
  3149. {
  3150. //FIXME filter first line
  3151. if(y>0) dering(dstBlock - stride - 8, stride, QP);
  3152. }
  3153. if(mode & TEMP_NOISE_FILTER)
  3154. {
  3155. tempNoiseReducer(dstBlock-8, stride,
  3156. tempBlured[isColor] + y*dstStride + x,
  3157. tempBluredPast[isColor] + (y>>3)*256 + (x>>3),
  3158. ppMode->maxTmpNoise);
  3159. }
  3160. }
  3161. #ifdef PP_FUNNY_STRIDE
  3162. /* did we use a tmp-block buffer */
  3163. if(x+7 >= width)
  3164. {
  3165. int i;
  3166. dstBlock= dstBlockPtrBackup;
  3167. srcBlock= srcBlockPtrBackup;
  3168. for(i=0;i<BLOCK_SIZE*2; i++)
  3169. {
  3170. memcpy(dstBlock+i*dstStride, tempDstBlock+i*dstStride, width-x);
  3171. }
  3172. }
  3173. #endif
  3174. dstBlock+=8;
  3175. srcBlock+=8;
  3176. #ifdef HAVE_MMX
  3177. tmpXchg= tempBlock1;
  3178. tempBlock1= tempBlock2;
  3179. tempBlock2 = tmpXchg;
  3180. #endif
  3181. }
  3182. if(mode & DERING)
  3183. {
  3184. if(y > 0) dering(dstBlock - dstStride - 8, dstStride, QP);
  3185. }
  3186. if((mode & TEMP_NOISE_FILTER))
  3187. {
  3188. tempNoiseReducer(dstBlock-8, dstStride,
  3189. tempBlured[isColor] + y*dstStride + x,
  3190. tempBluredPast[isColor] + (y>>3)*256 + (x>>3),
  3191. ppMode->maxTmpNoise);
  3192. }
  3193. /* did we use a tmp buffer for the last lines*/
  3194. if(y+15 >= height)
  3195. {
  3196. uint8_t *dstBlock= &(dst[y*dstStride]);
  3197. memcpy(dstBlock, tempDst + dstStride, dstStride*(height-y) );
  3198. }
  3199. /*
  3200. for(x=0; x<width; x+=32)
  3201. {
  3202. volatile int i;
  3203. i+= + dstBlock[x + 7*dstStride] + dstBlock[x + 8*dstStride]
  3204. + dstBlock[x + 9*dstStride] + dstBlock[x +10*dstStride]
  3205. + dstBlock[x +11*dstStride] + dstBlock[x +12*dstStride];
  3206. // + dstBlock[x +13*dstStride]
  3207. // + dstBlock[x +14*dstStride] + dstBlock[x +15*dstStride];
  3208. }*/
  3209. }
  3210. #ifdef HAVE_3DNOW
  3211. asm volatile("femms");
  3212. #elif defined (HAVE_MMX)
  3213. asm volatile("emms");
  3214. #endif
  3215. #ifdef TIMING
  3216. // FIXME diff is mostly the time spent for rdtsc (should subtract that but ...)
  3217. sumTime= rdtsc() - sumTime;
  3218. if(!isColor)
  3219. printf("cpy:%4dk, vert:%4dk, horiz:%4dk, sum:%4dk, diff:%4dk, color: %d/%d \r",
  3220. (int)(memcpyTime/1000), (int)(vertTime/1000), (int)(horizTime/1000),
  3221. (int)(sumTime/1000), (int)((sumTime-memcpyTime-vertTime-horizTime)/1000)
  3222. , black, white);
  3223. #endif
  3224. #ifdef DEBUG_BRIGHTNESS
  3225. if(!isColor)
  3226. {
  3227. int max=1;
  3228. int i;
  3229. for(i=0; i<256; i++)
  3230. if(yHistogram[i] > max) max=yHistogram[i];
  3231. for(i=1; i<256; i++)
  3232. {
  3233. int x;
  3234. int start=yHistogram[i-1]/(max/256+1);
  3235. int end=yHistogram[i]/(max/256+1);
  3236. int inc= end > start ? 1 : -1;
  3237. for(x=start; x!=end+inc; x+=inc)
  3238. dst[ i*dstStride + x]+=128;
  3239. }
  3240. for(i=0; i<100; i+=2)
  3241. {
  3242. dst[ (white)*dstStride + i]+=128;
  3243. dst[ (black)*dstStride + i]+=128;
  3244. }
  3245. }
  3246. #endif
  3247. }