You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3372 lines
96KB

  1. /*
  2. Copyright (C) 2001 Michael Niedermayer (michaelni@gmx.at)
  3. This program is free software; you can redistribute it and/or modify
  4. it under the terms of the GNU General Public License as published by
  5. the Free Software Foundation; either version 2 of the License, or
  6. (at your option) any later version.
  7. This program is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU General Public License for more details.
  11. You should have received a copy of the GNU General Public License
  12. along with this program; if not, write to the Free Software
  13. Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  14. */
  15. /*
  16. C MMX MMX2 3DNow
  17. isVertDC Ec Ec
  18. isVertMinMaxOk Ec Ec
  19. doVertLowPass E e e
  20. doVertDefFilter Ec Ec Ec
  21. isHorizDC Ec Ec
  22. isHorizMinMaxOk a E
  23. doHorizLowPass E e e
  24. doHorizDefFilter Ec Ec Ec
  25. deRing E e e*
  26. Vertical RKAlgo1 E a a
  27. Horizontal RKAlgo1 a a
  28. Vertical X1 a E E
  29. Horizontal X1 a E E
  30. LinIpolDeinterlace e E E*
  31. CubicIpolDeinterlace a e e*
  32. LinBlendDeinterlace e E E*
  33. MedianDeinterlace Ec Ec
  34. * i dont have a 3dnow CPU -> its untested
  35. E = Exact implementation
  36. e = allmost exact implementation (slightly different rounding,...)
  37. a = alternative / approximate impl
  38. c = checked against the other implementations (-vo md5)
  39. */
  40. /*
  41. TODO:
  42. verify that everything workes as it should (how?)
  43. reduce the time wasted on the mem transfer
  44. implement dering
  45. implement everything in C at least (done at the moment but ...)
  46. unroll stuff if instructions depend too much on the prior one
  47. we use 8x8 blocks for the horizontal filters, opendivx seems to use 8x4?
  48. move YScale thing to the end instead of fixing QP
  49. write a faster and higher quality deblocking filter :)
  50. do something about the speed of the horizontal filters
  51. make the mainloop more flexible (variable number of blocks at once
  52. (the if/else stuff per block is slowing things down)
  53. compare the quality & speed of all filters
  54. split this huge file
  55. fix warnings (unused vars, ...)
  56. noise reduction filters
  57. border remover
  58. ...
  59. Notes:
  60. */
  61. //Changelog: use the CVS log
  62. #include <inttypes.h>
  63. #include <stdio.h>
  64. #include <stdlib.h>
  65. #include <string.h>
  66. #include "../config.h"
  67. #ifdef HAVE_MALLOC_H
  68. #include <malloc.h>
  69. #endif
  70. //#undef HAVE_MMX2
  71. //#define HAVE_3DNOW
  72. //#undef HAVE_MMX
  73. #include "postprocess.h"
  74. #define MIN(a,b) ((a) > (b) ? (b) : (a))
  75. #define MAX(a,b) ((a) < (b) ? (b) : (a))
  76. #define ABS(a) ((a) > 0 ? (a) : (-(a)))
  77. #define SIGN(a) ((a) > 0 ? 1 : -1)
  78. #ifdef HAVE_MMX2
  79. #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
  80. #elif defined (HAVE_3DNOW)
  81. #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
  82. #endif
  83. #ifdef HAVE_MMX2
  84. #define PMINUB(a,b,t) "pminub " #a ", " #b " \n\t"
  85. #elif defined (HAVE_MMX)
  86. #define PMINUB(b,a,t) \
  87. "movq " #a ", " #t " \n\t"\
  88. "psubusb " #b ", " #t " \n\t"\
  89. "psubb " #t ", " #a " \n\t"
  90. #endif
  91. #ifdef HAVE_MMX2
  92. #define PMAXUB(a,b) "pmaxub " #a ", " #b " \n\t"
  93. #elif defined (HAVE_MMX)
  94. #define PMAXUB(a,b) \
  95. "psubusb " #a ", " #b " \n\t"\
  96. "paddb " #a ", " #b " \n\t"
  97. #endif
  98. #define GET_MODE_BUFFER_SIZE 500
  99. #define OPTIONS_ARRAY_SIZE 10
  100. static uint64_t packedYOffset= 0x0000000000000000LL;
  101. static uint64_t packedYScale= 0x0100010001000100LL;
  102. static uint64_t w05= 0x0005000500050005LL;
  103. static uint64_t w20= 0x0020002000200020LL;
  104. static uint64_t w1400= 0x1400140014001400LL;
  105. static uint64_t bm00000001= 0x00000000000000FFLL;
  106. static uint64_t bm00010000= 0x000000FF00000000LL;
  107. static uint64_t bm00001000= 0x00000000FF000000LL;
  108. static uint64_t bm10000000= 0xFF00000000000000LL;
  109. static uint64_t bm10000001= 0xFF000000000000FFLL;
  110. static uint64_t bm11000011= 0xFFFF00000000FFFFLL;
  111. static uint64_t bm00000011= 0x000000000000FFFFLL;
  112. static uint64_t bm11111110= 0xFFFFFFFFFFFFFF00LL;
  113. static uint64_t bm11000000= 0xFFFF000000000000LL;
  114. static uint64_t bm00011000= 0x000000FFFF000000LL;
  115. static uint64_t bm00110011= 0x0000FFFF0000FFFFLL;
  116. static uint64_t bm11001100= 0xFFFF0000FFFF0000LL;
  117. static uint64_t b00= 0x0000000000000000LL;
  118. static uint64_t b01= 0x0101010101010101LL;
  119. static uint64_t b02= 0x0202020202020202LL;
  120. static uint64_t b0F= 0x0F0F0F0F0F0F0F0FLL;
  121. static uint64_t b04= 0x0404040404040404LL;
  122. static uint64_t b08= 0x0808080808080808LL;
  123. static uint64_t bFF= 0xFFFFFFFFFFFFFFFFLL;
  124. static uint64_t b20= 0x2020202020202020LL;
  125. static uint64_t b80= 0x8080808080808080LL;
  126. static uint64_t b7E= 0x7E7E7E7E7E7E7E7ELL;
  127. static uint64_t b7C= 0x7C7C7C7C7C7C7C7CLL;
  128. static uint64_t b3F= 0x3F3F3F3F3F3F3F3FLL;
  129. static uint64_t temp0=0;
  130. static uint64_t temp1=0;
  131. static uint64_t temp2=0;
  132. static uint64_t temp3=0;
  133. static uint64_t temp4=0;
  134. static uint64_t temp5=0;
  135. static uint64_t pQPb=0;
  136. static uint64_t pQPb2=0;
  137. static uint8_t tempBlocks[8*16*2]; //used for the horizontal code
  138. int hFlatnessThreshold= 56 - 16;
  139. int vFlatnessThreshold= 56 - 16;
  140. //amount of "black" u r willing to loose to get a brightness corrected picture
  141. double maxClippedThreshold= 0.01;
  142. int maxAllowedY=234;
  143. int minAllowedY=16;
  144. static struct PPFilter filters[]=
  145. {
  146. {"hb", "hdeblock", 1, 1, 3, H_DEBLOCK},
  147. {"vb", "vdeblock", 1, 2, 4, V_DEBLOCK},
  148. {"vr", "rkvdeblock", 1, 2, 4, H_RK1_FILTER},
  149. {"h1", "x1hdeblock", 1, 1, 3, H_X1_FILTER},
  150. {"v1", "x1vdeblock", 1, 2, 4, V_X1_FILTER},
  151. {"dr", "dering", 1, 5, 6, DERING},
  152. {"al", "autolevels", 0, 1, 2, LEVEL_FIX},
  153. {"lb", "linblenddeint", 0, 1, 6, LINEAR_BLEND_DEINT_FILTER},
  154. {"li", "linipoldeint", 0, 1, 6, LINEAR_IPOL_DEINT_FILTER},
  155. {"ci", "cubicipoldeint", 0, 1, 6, CUBIC_IPOL_DEINT_FILTER},
  156. {"md", "mediandeint", 0, 1, 6, MEDIAN_DEINT_FILTER},
  157. {NULL, NULL,0,0,0,0} //End Marker
  158. };
  159. static char *replaceTable[]=
  160. {
  161. "default", "hdeblock:a,vdeblock:a,dering:a,autolevels",
  162. "de", "hdeblock:a,vdeblock:a,dering:a,autolevels",
  163. "fast", "x1hdeblock:a,x1vdeblock:a,dering:a,autolevels",
  164. "fa", "x1hdeblock:a,x1vdeblock:a,dering:a,autolevels",
  165. NULL //End Marker
  166. };
  167. static inline void unusedVariableWarningFixer()
  168. {
  169. if(
  170. packedYOffset + packedYScale + w05 + w20 + w1400 + bm00000001 + bm00010000
  171. + bm00001000 + bm10000000 + bm10000001 + bm11000011 + bm00000011 + bm11111110
  172. + bm11000000 + bm00011000 + bm00110011 + bm11001100 + b00 + b01 + b02 + b0F
  173. + bFF + b20 + b80 + b7E + b7C + b3F + temp0 + temp1 + temp2 + temp3 + temp4
  174. + temp5 + pQPb== 0) b00=0;
  175. }
  176. #ifdef TIMING
  177. static inline long long rdtsc()
  178. {
  179. long long l;
  180. asm volatile( "rdtsc\n\t"
  181. : "=A" (l)
  182. );
  183. // printf("%d\n", int(l/1000));
  184. return l;
  185. }
  186. #endif
  187. #ifdef HAVE_MMX2
  188. static inline void prefetchnta(void *p)
  189. {
  190. asm volatile( "prefetchnta (%0)\n\t"
  191. : : "r" (p)
  192. );
  193. }
  194. static inline void prefetcht0(void *p)
  195. {
  196. asm volatile( "prefetcht0 (%0)\n\t"
  197. : : "r" (p)
  198. );
  199. }
  200. static inline void prefetcht1(void *p)
  201. {
  202. asm volatile( "prefetcht1 (%0)\n\t"
  203. : : "r" (p)
  204. );
  205. }
  206. static inline void prefetcht2(void *p)
  207. {
  208. asm volatile( "prefetcht2 (%0)\n\t"
  209. : : "r" (p)
  210. );
  211. }
  212. #endif
  213. //FIXME? |255-0| = 1 (shouldnt be a problem ...)
  214. /**
  215. * Check if the middle 8x8 Block in the given 8x16 block is flat
  216. */
  217. static inline int isVertDC(uint8_t src[], int stride){
  218. int numEq= 0;
  219. #ifndef HAVE_MMX
  220. int y;
  221. #endif
  222. src+= stride*4; // src points to begin of the 8x8 Block
  223. #ifdef HAVE_MMX
  224. asm volatile(
  225. "leal (%1, %2), %%eax \n\t"
  226. "leal (%%eax, %2, 4), %%ebx \n\t"
  227. // 0 1 2 3 4 5 6 7 8 9
  228. // %1 eax eax+%2 eax+2%2 %1+4%2 ebx ebx+%2 ebx+2%2 %1+8%2 ebx+4%2
  229. "movq b7E, %%mm7 \n\t" // mm7 = 0x7F
  230. "movq b7C, %%mm6 \n\t" // mm6 = 0x7D
  231. "movq (%1), %%mm0 \n\t"
  232. "movq (%%eax), %%mm1 \n\t"
  233. "psubb %%mm1, %%mm0 \n\t" // mm0 = differnece
  234. "paddb %%mm7, %%mm0 \n\t"
  235. "pcmpgtb %%mm6, %%mm0 \n\t"
  236. "movq (%%eax,%2), %%mm2 \n\t"
  237. "psubb %%mm2, %%mm1 \n\t"
  238. "paddb %%mm7, %%mm1 \n\t"
  239. "pcmpgtb %%mm6, %%mm1 \n\t"
  240. "paddb %%mm1, %%mm0 \n\t"
  241. "movq (%%eax, %2, 2), %%mm1 \n\t"
  242. "psubb %%mm1, %%mm2 \n\t"
  243. "paddb %%mm7, %%mm2 \n\t"
  244. "pcmpgtb %%mm6, %%mm2 \n\t"
  245. "paddb %%mm2, %%mm0 \n\t"
  246. "movq (%1, %2, 4), %%mm2 \n\t"
  247. "psubb %%mm2, %%mm1 \n\t"
  248. "paddb %%mm7, %%mm1 \n\t"
  249. "pcmpgtb %%mm6, %%mm1 \n\t"
  250. "paddb %%mm1, %%mm0 \n\t"
  251. "movq (%%ebx), %%mm1 \n\t"
  252. "psubb %%mm1, %%mm2 \n\t"
  253. "paddb %%mm7, %%mm2 \n\t"
  254. "pcmpgtb %%mm6, %%mm2 \n\t"
  255. "paddb %%mm2, %%mm0 \n\t"
  256. "movq (%%ebx, %2), %%mm2 \n\t"
  257. "psubb %%mm2, %%mm1 \n\t"
  258. "paddb %%mm7, %%mm1 \n\t"
  259. "pcmpgtb %%mm6, %%mm1 \n\t"
  260. "paddb %%mm1, %%mm0 \n\t"
  261. "movq (%%ebx, %2, 2), %%mm1 \n\t"
  262. "psubb %%mm1, %%mm2 \n\t"
  263. "paddb %%mm7, %%mm2 \n\t"
  264. "pcmpgtb %%mm6, %%mm2 \n\t"
  265. "paddb %%mm2, %%mm0 \n\t"
  266. " \n\t"
  267. "movq %%mm0, %%mm1 \n\t"
  268. "psrlw $8, %%mm0 \n\t"
  269. "paddb %%mm1, %%mm0 \n\t"
  270. #ifdef HAVE_MMX2
  271. "pshufw $0xF9, %%mm0, %%mm1 \n\t"
  272. "paddb %%mm1, %%mm0 \n\t"
  273. "pshufw $0xFE, %%mm0, %%mm1 \n\t"
  274. #else
  275. "movq %%mm0, %%mm1 \n\t"
  276. "psrlq $16, %%mm0 \n\t"
  277. "paddb %%mm1, %%mm0 \n\t"
  278. "movq %%mm0, %%mm1 \n\t"
  279. "psrlq $32, %%mm0 \n\t"
  280. #endif
  281. "paddb %%mm1, %%mm0 \n\t"
  282. "movd %%mm0, %0 \n\t"
  283. : "=r" (numEq)
  284. : "r" (src), "r" (stride)
  285. : "%eax", "%ebx"
  286. );
  287. numEq= (256 - numEq) &0xFF;
  288. #else
  289. for(y=0; y<BLOCK_SIZE-1; y++)
  290. {
  291. if(((src[0] - src[0+stride] + 1)&0xFFFF) < 3) numEq++;
  292. if(((src[1] - src[1+stride] + 1)&0xFFFF) < 3) numEq++;
  293. if(((src[2] - src[2+stride] + 1)&0xFFFF) < 3) numEq++;
  294. if(((src[3] - src[3+stride] + 1)&0xFFFF) < 3) numEq++;
  295. if(((src[4] - src[4+stride] + 1)&0xFFFF) < 3) numEq++;
  296. if(((src[5] - src[5+stride] + 1)&0xFFFF) < 3) numEq++;
  297. if(((src[6] - src[6+stride] + 1)&0xFFFF) < 3) numEq++;
  298. if(((src[7] - src[7+stride] + 1)&0xFFFF) < 3) numEq++;
  299. src+= stride;
  300. }
  301. #endif
  302. /* if(abs(numEq - asmEq) > 0)
  303. {
  304. printf("\nasm:%d c:%d\n", asmEq, numEq);
  305. for(int y=0; y<8; y++)
  306. {
  307. for(int x=0; x<8; x++)
  308. {
  309. printf("%d ", temp[x + y*stride]);
  310. }
  311. printf("\n");
  312. }
  313. }
  314. */
  315. // for(int i=0; i<numEq/8; i++) src[i]=255;
  316. return (numEq > vFlatnessThreshold) ? 1 : 0;
  317. }
  318. static inline int isVertMinMaxOk(uint8_t src[], int stride, int QP)
  319. {
  320. #ifdef HAVE_MMX
  321. int isOk;
  322. src+= stride*3;
  323. asm volatile(
  324. // "int $3 \n\t"
  325. "movq (%1, %2), %%mm0 \n\t"
  326. "movq (%1, %2, 8), %%mm1 \n\t"
  327. "movq %%mm0, %%mm2 \n\t"
  328. "psubusb %%mm1, %%mm0 \n\t"
  329. "psubusb %%mm2, %%mm1 \n\t"
  330. "por %%mm1, %%mm0 \n\t" // ABS Diff
  331. "movq pQPb, %%mm7 \n\t" // QP,..., QP
  332. "paddusb %%mm7, %%mm7 \n\t" // 2QP ... 2QP
  333. "psubusb %%mm7, %%mm0 \n\t" // Diff <= 2QP -> 0
  334. "pcmpeqd b00, %%mm0 \n\t"
  335. "psrlq $16, %%mm0 \n\t"
  336. "pcmpeqd bFF, %%mm0 \n\t"
  337. // "movd %%mm0, (%1, %2, 4)\n\t"
  338. "movd %%mm0, %0 \n\t"
  339. : "=r" (isOk)
  340. : "r" (src), "r" (stride)
  341. );
  342. return isOk;
  343. #else
  344. int isOk2= 1;
  345. int x;
  346. src+= stride*3;
  347. for(x=0; x<BLOCK_SIZE; x++)
  348. {
  349. if(abs((int)src[x + stride] - (int)src[x + (stride<<3)]) > 2*QP) isOk2=0;
  350. }
  351. /* if(isOk && !isOk2 || !isOk && isOk2)
  352. {
  353. printf("\nasm:%d c:%d QP:%d\n", isOk, isOk2, QP);
  354. for(int y=0; y<9; y++)
  355. {
  356. for(int x=0; x<8; x++)
  357. {
  358. printf("%d ", src[x + y*stride]);
  359. }
  360. printf("\n");
  361. }
  362. } */
  363. return isOk2;
  364. #endif
  365. }
  366. /**
  367. * Do a vertical low pass filter on the 8x16 block (only write to the 8x8 block in the middle)
  368. * using the 9-Tap Filter (1,1,2,2,4,2,2,1,1)/16
  369. */
  370. static inline void doVertLowPass(uint8_t *src, int stride, int QP)
  371. {
  372. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  373. src+= stride*3;
  374. asm volatile( //"movv %0 %1 %2\n\t"
  375. "pushl %0 \n\t"
  376. "movq pQPb, %%mm0 \n\t" // QP,..., QP
  377. "movq (%0), %%mm6 \n\t"
  378. "movq (%0, %1), %%mm5 \n\t"
  379. "movq %%mm5, %%mm1 \n\t"
  380. "movq %%mm6, %%mm2 \n\t"
  381. "psubusb %%mm6, %%mm5 \n\t"
  382. "psubusb %%mm1, %%mm2 \n\t"
  383. "por %%mm5, %%mm2 \n\t" // ABS Diff of lines
  384. "psubusb %%mm0, %%mm2 \n\t" // diff <= QP -> 0
  385. "pcmpeqb b00, %%mm2 \n\t" // diff <= QP -> FF
  386. "pand %%mm2, %%mm6 \n\t"
  387. "pandn %%mm1, %%mm2 \n\t"
  388. "por %%mm2, %%mm6 \n\t"// First Line to Filter
  389. "movq (%0, %1, 8), %%mm5 \n\t"
  390. "leal (%0, %1, 4), %%eax \n\t"
  391. "leal (%0, %1, 8), %%ebx \n\t"
  392. "subl %1, %%ebx \n\t"
  393. "addl %1, %0 \n\t" // %0 points to line 1 not 0
  394. "movq (%0, %1, 8), %%mm7 \n\t"
  395. "movq %%mm5, %%mm1 \n\t"
  396. "movq %%mm7, %%mm2 \n\t"
  397. "psubusb %%mm7, %%mm5 \n\t"
  398. "psubusb %%mm1, %%mm2 \n\t"
  399. "por %%mm5, %%mm2 \n\t" // ABS Diff of lines
  400. "psubusb %%mm0, %%mm2 \n\t" // diff <= QP -> 0
  401. "pcmpeqb b00, %%mm2 \n\t" // diff <= QP -> FF
  402. "pand %%mm2, %%mm7 \n\t"
  403. "pandn %%mm1, %%mm2 \n\t"
  404. "por %%mm2, %%mm7 \n\t" // First Line to Filter
  405. // 1 2 3 4 5 6 7 8
  406. // %0 %0+%1 %0+2%1 eax %0+4%1 eax+2%1 ebx eax+4%1
  407. // 6 4 2 2 1 1
  408. // 6 4 4 2
  409. // 6 8 2
  410. "movq (%0, %1), %%mm0 \n\t" // 1
  411. "movq %%mm0, %%mm1 \n\t" // 1
  412. PAVGB(%%mm6, %%mm0) //1 1 /2
  413. PAVGB(%%mm6, %%mm0) //3 1 /4
  414. "movq (%0, %1, 4), %%mm2 \n\t" // 1
  415. "movq %%mm2, %%mm5 \n\t" // 1
  416. PAVGB((%%eax), %%mm2) // 11 /2
  417. PAVGB((%0, %1, 2), %%mm2) // 211 /4
  418. "movq %%mm2, %%mm3 \n\t" // 211 /4
  419. "movq (%0), %%mm4 \n\t" // 1
  420. PAVGB(%%mm4, %%mm3) // 4 211 /8
  421. PAVGB(%%mm0, %%mm3) //642211 /16
  422. "movq %%mm3, (%0) \n\t" // X
  423. // mm1=2 mm2=3(211) mm4=1 mm5=5 mm6=0 mm7=9
  424. "movq %%mm1, %%mm0 \n\t" // 1
  425. PAVGB(%%mm6, %%mm0) //1 1 /2
  426. "movq %%mm4, %%mm3 \n\t" // 1
  427. PAVGB((%0,%1,2), %%mm3) // 1 1 /2
  428. PAVGB((%%eax,%1,2), %%mm5) // 11 /2
  429. PAVGB((%%eax), %%mm5) // 211 /4
  430. PAVGB(%%mm5, %%mm3) // 2 2211 /8
  431. PAVGB(%%mm0, %%mm3) //4242211 /16
  432. "movq %%mm3, (%0,%1) \n\t" // X
  433. // mm1=2 mm2=3(211) mm4=1 mm5=4(211) mm6=0 mm7=9
  434. PAVGB(%%mm4, %%mm6) //11 /2
  435. "movq (%%ebx), %%mm0 \n\t" // 1
  436. PAVGB((%%eax, %1, 2), %%mm0) // 11/2
  437. "movq %%mm0, %%mm3 \n\t" // 11/2
  438. PAVGB(%%mm1, %%mm0) // 2 11/4
  439. PAVGB(%%mm6, %%mm0) //222 11/8
  440. PAVGB(%%mm2, %%mm0) //22242211/16
  441. "movq (%0, %1, 2), %%mm2 \n\t" // 1
  442. "movq %%mm0, (%0, %1, 2) \n\t" // X
  443. // mm1=2 mm2=3 mm3=6(11) mm4=1 mm5=4(211) mm6=0(11) mm7=9
  444. "movq (%%eax, %1, 4), %%mm0 \n\t" // 1
  445. PAVGB((%%ebx), %%mm0) // 11 /2
  446. PAVGB(%%mm0, %%mm6) //11 11 /4
  447. PAVGB(%%mm1, %%mm4) // 11 /2
  448. PAVGB(%%mm2, %%mm1) // 11 /2
  449. PAVGB(%%mm1, %%mm6) //1122 11 /8
  450. PAVGB(%%mm5, %%mm6) //112242211 /16
  451. "movq (%%eax), %%mm5 \n\t" // 1
  452. "movq %%mm6, (%%eax) \n\t" // X
  453. // mm0=7(11) mm1=2(11) mm2=3 mm3=6(11) mm4=1(11) mm5=4 mm7=9
  454. "movq (%%eax, %1, 4), %%mm6 \n\t" // 1
  455. PAVGB(%%mm7, %%mm6) // 11 /2
  456. PAVGB(%%mm4, %%mm6) // 11 11 /4
  457. PAVGB(%%mm3, %%mm6) // 11 2211 /8
  458. PAVGB(%%mm5, %%mm2) // 11 /2
  459. "movq (%0, %1, 4), %%mm4 \n\t" // 1
  460. PAVGB(%%mm4, %%mm2) // 112 /4
  461. PAVGB(%%mm2, %%mm6) // 112242211 /16
  462. "movq %%mm6, (%0, %1, 4) \n\t" // X
  463. // mm0=7(11) mm1=2(11) mm2=3(112) mm3=6(11) mm4=5 mm5=4 mm7=9
  464. PAVGB(%%mm7, %%mm1) // 11 2 /4
  465. PAVGB(%%mm4, %%mm5) // 11 /2
  466. PAVGB(%%mm5, %%mm0) // 11 11 /4
  467. "movq (%%eax, %1, 2), %%mm6 \n\t" // 1
  468. PAVGB(%%mm6, %%mm1) // 11 4 2 /8
  469. PAVGB(%%mm0, %%mm1) // 11224222 /16
  470. "movq %%mm1, (%%eax, %1, 2) \n\t" // X
  471. // mm2=3(112) mm3=6(11) mm4=5 mm5=4(11) mm6=6 mm7=9
  472. PAVGB((%%ebx), %%mm2) // 112 4 /8
  473. "movq (%%eax, %1, 4), %%mm0 \n\t" // 1
  474. PAVGB(%%mm0, %%mm6) // 1 1 /2
  475. PAVGB(%%mm7, %%mm6) // 1 12 /4
  476. PAVGB(%%mm2, %%mm6) // 1122424 /4
  477. "movq %%mm6, (%%ebx) \n\t" // X
  478. // mm0=8 mm3=6(11) mm4=5 mm5=4(11) mm7=9
  479. PAVGB(%%mm7, %%mm5) // 11 2 /4
  480. PAVGB(%%mm7, %%mm5) // 11 6 /8
  481. PAVGB(%%mm3, %%mm0) // 112 /4
  482. PAVGB(%%mm0, %%mm5) // 112246 /16
  483. "movq %%mm5, (%%eax, %1, 4) \n\t" // X
  484. "popl %0\n\t"
  485. :
  486. : "r" (src), "r" (stride)
  487. : "%eax", "%ebx"
  488. );
  489. #else
  490. const int l1= stride;
  491. const int l2= stride + l1;
  492. const int l3= stride + l2;
  493. const int l4= stride + l3;
  494. const int l5= stride + l4;
  495. const int l6= stride + l5;
  496. const int l7= stride + l6;
  497. const int l8= stride + l7;
  498. const int l9= stride + l8;
  499. int x;
  500. src+= stride*3;
  501. for(x=0; x<BLOCK_SIZE; x++)
  502. {
  503. const int first= ABS(src[0] - src[l1]) < QP ? src[0] : src[l1];
  504. const int last= ABS(src[l8] - src[l9]) < QP ? src[l9] : src[l8];
  505. int sums[9];
  506. sums[0] = first + src[l1];
  507. sums[1] = src[l1] + src[l2];
  508. sums[2] = src[l2] + src[l3];
  509. sums[3] = src[l3] + src[l4];
  510. sums[4] = src[l4] + src[l5];
  511. sums[5] = src[l5] + src[l6];
  512. sums[6] = src[l6] + src[l7];
  513. sums[7] = src[l7] + src[l8];
  514. sums[8] = src[l8] + last;
  515. src[l1]= ((sums[0]<<2) + ((first + sums[2])<<1) + sums[4] + 8)>>4;
  516. src[l2]= ((src[l2]<<2) + ((first + sums[0] + sums[3])<<1) + sums[5] + 8)>>4;
  517. src[l3]= ((src[l3]<<2) + ((first + sums[1] + sums[4])<<1) + sums[6] + 8)>>4;
  518. src[l4]= ((src[l4]<<2) + ((sums[2] + sums[5])<<1) + sums[0] + sums[7] + 8)>>4;
  519. src[l5]= ((src[l5]<<2) + ((sums[3] + sums[6])<<1) + sums[1] + sums[8] + 8)>>4;
  520. src[l6]= ((src[l6]<<2) + ((last + sums[7] + sums[4])<<1) + sums[2] + 8)>>4;
  521. src[l7]= (((last + src[l7])<<2) + ((src[l8] + sums[5])<<1) + sums[3] + 8)>>4;
  522. src[l8]= ((sums[8]<<2) + ((last + sums[6])<<1) + sums[4] + 8)>>4;
  523. src++;
  524. }
  525. #endif
  526. }
  527. /**
  528. * Experimental implementation of the filter (Algorithm 1) described in a paper from Ramkishor & Karandikar
  529. * values are correctly clipped (MMX2)
  530. * values are wraparound (C)
  531. * conclusion: its fast, but introduces ugly horizontal patterns if there is a continious gradient
  532. 0 8 16 24
  533. x = 8
  534. x/2 = 4
  535. x/8 = 1
  536. 1 12 12 23
  537. */
  538. static inline void vertRK1Filter(uint8_t *src, int stride, int QP)
  539. {
  540. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  541. src+= stride*3;
  542. // FIXME rounding
  543. asm volatile(
  544. "pxor %%mm7, %%mm7 \n\t" // 0
  545. "movq b80, %%mm6 \n\t" // MIN_SIGNED_BYTE
  546. "leal (%0, %1), %%eax \n\t"
  547. "leal (%%eax, %1, 4), %%ebx \n\t"
  548. // 0 1 2 3 4 5 6 7 8 9
  549. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  550. "movq pQPb, %%mm0 \n\t" // QP,..., QP
  551. "movq %%mm0, %%mm1 \n\t" // QP,..., QP
  552. "paddusb b02, %%mm0 \n\t"
  553. "psrlw $2, %%mm0 \n\t"
  554. "pand b3F, %%mm0 \n\t" // QP/4,..., QP/4
  555. "paddusb %%mm1, %%mm0 \n\t" // QP*1.25 ...
  556. "movq (%0, %1, 4), %%mm2 \n\t" // line 4
  557. "movq (%%ebx), %%mm3 \n\t" // line 5
  558. "movq %%mm2, %%mm4 \n\t" // line 4
  559. "pcmpeqb %%mm5, %%mm5 \n\t" // -1
  560. "pxor %%mm2, %%mm5 \n\t" // -line 4 - 1
  561. PAVGB(%%mm3, %%mm5)
  562. "paddb %%mm6, %%mm5 \n\t" // (l5-l4)/2
  563. "psubusb %%mm3, %%mm4 \n\t"
  564. "psubusb %%mm2, %%mm3 \n\t"
  565. "por %%mm3, %%mm4 \n\t" // |l4 - l5|
  566. "psubusb %%mm0, %%mm4 \n\t"
  567. "pcmpeqb %%mm7, %%mm4 \n\t"
  568. "pand %%mm4, %%mm5 \n\t" // d/2
  569. // "paddb %%mm6, %%mm2 \n\t" // line 4 + 0x80
  570. "paddb %%mm5, %%mm2 \n\t"
  571. // "psubb %%mm6, %%mm2 \n\t"
  572. "movq %%mm2, (%0,%1, 4) \n\t"
  573. "movq (%%ebx), %%mm2 \n\t"
  574. // "paddb %%mm6, %%mm2 \n\t" // line 5 + 0x80
  575. "psubb %%mm5, %%mm2 \n\t"
  576. // "psubb %%mm6, %%mm2 \n\t"
  577. "movq %%mm2, (%%ebx) \n\t"
  578. "paddb %%mm6, %%mm5 \n\t"
  579. "psrlw $2, %%mm5 \n\t"
  580. "pand b3F, %%mm5 \n\t"
  581. "psubb b20, %%mm5 \n\t" // (l5-l4)/8
  582. "movq (%%eax, %1, 2), %%mm2 \n\t"
  583. "paddb %%mm6, %%mm2 \n\t" // line 3 + 0x80
  584. "paddsb %%mm5, %%mm2 \n\t"
  585. "psubb %%mm6, %%mm2 \n\t"
  586. "movq %%mm2, (%%eax, %1, 2) \n\t"
  587. "movq (%%ebx, %1), %%mm2 \n\t"
  588. "paddb %%mm6, %%mm2 \n\t" // line 6 + 0x80
  589. "psubsb %%mm5, %%mm2 \n\t"
  590. "psubb %%mm6, %%mm2 \n\t"
  591. "movq %%mm2, (%%ebx, %1) \n\t"
  592. :
  593. : "r" (src), "r" (stride)
  594. : "%eax", "%ebx"
  595. );
  596. #else
  597. const int l1= stride;
  598. const int l2= stride + l1;
  599. const int l3= stride + l2;
  600. const int l4= stride + l3;
  601. const int l5= stride + l4;
  602. const int l6= stride + l5;
  603. // const int l7= stride + l6;
  604. // const int l8= stride + l7;
  605. // const int l9= stride + l8;
  606. int x;
  607. src+= stride*3;
  608. for(x=0; x<BLOCK_SIZE; x++)
  609. {
  610. if(ABS(src[l4]-src[l5]) < QP + QP/4)
  611. {
  612. int v = (src[l5] - src[l4]);
  613. src[l3] +=v/8;
  614. src[l4] +=v/2;
  615. src[l5] -=v/2;
  616. src[l6] -=v/8;
  617. }
  618. src++;
  619. }
  620. #endif
  621. }
  622. /**
  623. * Experimental Filter 1
  624. * will not damage linear gradients
  625. * Flat blocks should look like they where passed through the (1,1,2,2,4,2,2,1,1) 9-Tap filter
  626. * can only smooth blocks at the expected locations (it cant smooth them if they did move)
  627. * MMX2 version does correct clipping C version doesnt
  628. */
  629. static inline void vertX1Filter(uint8_t *src, int stride, int QP)
  630. {
  631. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  632. src+= stride*3;
  633. asm volatile(
  634. "pxor %%mm7, %%mm7 \n\t" // 0
  635. // "movq b80, %%mm6 \n\t" // MIN_SIGNED_BYTE
  636. "leal (%0, %1), %%eax \n\t"
  637. "leal (%%eax, %1, 4), %%ebx \n\t"
  638. // 0 1 2 3 4 5 6 7 8 9
  639. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  640. "movq (%%eax, %1, 2), %%mm0 \n\t" // line 3
  641. "movq (%0, %1, 4), %%mm1 \n\t" // line 4
  642. "movq %%mm1, %%mm2 \n\t" // line 4
  643. "psubusb %%mm0, %%mm1 \n\t"
  644. "psubusb %%mm2, %%mm0 \n\t"
  645. "por %%mm1, %%mm0 \n\t" // |l2 - l3|
  646. "movq (%%ebx), %%mm3 \n\t" // line 5
  647. "movq (%%ebx, %1), %%mm4 \n\t" // line 6
  648. "movq %%mm3, %%mm5 \n\t" // line 5
  649. "psubusb %%mm4, %%mm3 \n\t"
  650. "psubusb %%mm5, %%mm4 \n\t"
  651. "por %%mm4, %%mm3 \n\t" // |l5 - l6|
  652. PAVGB(%%mm3, %%mm0) // (|l2 - l3| + |l5 - l6|)/2
  653. "movq %%mm2, %%mm1 \n\t" // line 4
  654. "psubusb %%mm5, %%mm2 \n\t"
  655. "movq %%mm2, %%mm4 \n\t"
  656. "pcmpeqb %%mm7, %%mm2 \n\t" // (l4 - l5) <= 0 ? -1 : 0
  657. "psubusb %%mm1, %%mm5 \n\t"
  658. "por %%mm5, %%mm4 \n\t" // |l4 - l5|
  659. "psubusb %%mm0, %%mm4 \n\t" //d = MAX(0, |l4-l5| - (|l2-l3| + |l5-l6|)/2)
  660. "movq %%mm4, %%mm3 \n\t" // d
  661. "psubusb pQPb, %%mm4 \n\t"
  662. "pcmpeqb %%mm7, %%mm4 \n\t" // d <= QP ? -1 : 0
  663. "psubusb b01, %%mm3 \n\t"
  664. "pand %%mm4, %%mm3 \n\t" // d <= QP ? d : 0
  665. PAVGB(%%mm7, %%mm3) // d/2
  666. "movq %%mm3, %%mm1 \n\t" // d/2
  667. PAVGB(%%mm7, %%mm3) // d/4
  668. PAVGB(%%mm1, %%mm3) // 3*d/8
  669. "movq (%0, %1, 4), %%mm0 \n\t" // line 4
  670. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l4-1 : l4
  671. "psubusb %%mm3, %%mm0 \n\t"
  672. "pxor %%mm2, %%mm0 \n\t"
  673. "movq %%mm0, (%0, %1, 4) \n\t" // line 4
  674. "movq (%%ebx), %%mm0 \n\t" // line 5
  675. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l5-1 : l5
  676. "paddusb %%mm3, %%mm0 \n\t"
  677. "pxor %%mm2, %%mm0 \n\t"
  678. "movq %%mm0, (%%ebx) \n\t" // line 5
  679. PAVGB(%%mm7, %%mm1) // d/4
  680. "movq (%%eax, %1, 2), %%mm0 \n\t" // line 3
  681. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l4-1 : l4
  682. "psubusb %%mm1, %%mm0 \n\t"
  683. "pxor %%mm2, %%mm0 \n\t"
  684. "movq %%mm0, (%%eax, %1, 2) \n\t" // line 3
  685. "movq (%%ebx, %1), %%mm0 \n\t" // line 6
  686. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l5-1 : l5
  687. "paddusb %%mm1, %%mm0 \n\t"
  688. "pxor %%mm2, %%mm0 \n\t"
  689. "movq %%mm0, (%%ebx, %1) \n\t" // line 6
  690. PAVGB(%%mm7, %%mm1) // d/8
  691. "movq (%%eax, %1), %%mm0 \n\t" // line 2
  692. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l2-1 : l2
  693. "psubusb %%mm1, %%mm0 \n\t"
  694. "pxor %%mm2, %%mm0 \n\t"
  695. "movq %%mm0, (%%eax, %1) \n\t" // line 2
  696. "movq (%%ebx, %1, 2), %%mm0 \n\t" // line 7
  697. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l7-1 : l7
  698. "paddusb %%mm1, %%mm0 \n\t"
  699. "pxor %%mm2, %%mm0 \n\t"
  700. "movq %%mm0, (%%ebx, %1, 2) \n\t" // line 7
  701. :
  702. : "r" (src), "r" (stride)
  703. : "%eax", "%ebx"
  704. );
  705. #else
  706. const int l1= stride;
  707. const int l2= stride + l1;
  708. const int l3= stride + l2;
  709. const int l4= stride + l3;
  710. const int l5= stride + l4;
  711. const int l6= stride + l5;
  712. const int l7= stride + l6;
  713. // const int l8= stride + l7;
  714. // const int l9= stride + l8;
  715. int x;
  716. src+= stride*3;
  717. for(x=0; x<BLOCK_SIZE; x++)
  718. {
  719. int a= src[l3] - src[l4];
  720. int b= src[l4] - src[l5];
  721. int c= src[l5] - src[l6];
  722. int d= MAX(ABS(b) - (ABS(a) + ABS(c))/2, 0);
  723. if(d < QP)
  724. {
  725. int v = d * SIGN(-b);
  726. src[l2] +=v/8;
  727. src[l3] +=v/4;
  728. src[l4] +=3*v/8;
  729. src[l5] -=3*v/8;
  730. src[l6] -=v/4;
  731. src[l7] -=v/8;
  732. }
  733. src++;
  734. }
  735. /*
  736. const int l1= stride;
  737. const int l2= stride + l1;
  738. const int l3= stride + l2;
  739. const int l4= stride + l3;
  740. const int l5= stride + l4;
  741. const int l6= stride + l5;
  742. const int l7= stride + l6;
  743. const int l8= stride + l7;
  744. const int l9= stride + l8;
  745. for(int x=0; x<BLOCK_SIZE; x++)
  746. {
  747. int v2= src[l2];
  748. int v3= src[l3];
  749. int v4= src[l4];
  750. int v5= src[l5];
  751. int v6= src[l6];
  752. int v7= src[l7];
  753. if(ABS(v4-v5)<QP && ABS(v4-v5) - (ABS(v3-v4) + ABS(v5-v6))>0 )
  754. {
  755. src[l3] = (6*v2 + 4*v3 + 3*v4 + 2*v5 + v6 )/16;
  756. src[l4] = (3*v2 + 3*v3 + 4*v4 + 3*v5 + 2*v6 + v7 )/16;
  757. src[l5] = (1*v2 + 2*v3 + 3*v4 + 4*v5 + 3*v6 + 3*v7)/16;
  758. src[l6] = ( 1*v3 + 2*v4 + 3*v5 + 4*v6 + 6*v7)/16;
  759. }
  760. src++;
  761. }
  762. */
  763. #endif
  764. }
  765. /**
  766. * Experimental Filter 1 (Horizontal)
  767. * will not damage linear gradients
  768. * Flat blocks should look like they where passed through the (1,1,2,2,4,2,2,1,1) 9-Tap filter
  769. * can only smooth blocks at the expected locations (it cant smooth them if they did move)
  770. * MMX2 version does correct clipping C version doesnt
  771. * not identical with the vertical one
  772. */
  773. static inline void horizX1Filter(uint8_t *src, int stride, int QP)
  774. {
  775. int y;
  776. static uint64_t *lut= NULL;
  777. if(lut==NULL)
  778. {
  779. int i;
  780. lut= (uint64_t*)memalign(8, 256*8);
  781. for(i=0; i<256; i++)
  782. {
  783. int v= i < 128 ? 2*i : 2*(i-256);
  784. /*
  785. //Simulate 112242211 9-Tap filter
  786. uint64_t a= (v/16) & 0xFF;
  787. uint64_t b= (v/8) & 0xFF;
  788. uint64_t c= (v/4) & 0xFF;
  789. uint64_t d= (3*v/8) & 0xFF;
  790. */
  791. //Simulate piecewise linear interpolation
  792. uint64_t a= (v/16) & 0xFF;
  793. uint64_t b= (v*3/16) & 0xFF;
  794. uint64_t c= (v*5/16) & 0xFF;
  795. uint64_t d= (7*v/16) & 0xFF;
  796. uint64_t A= (0x100 - a)&0xFF;
  797. uint64_t B= (0x100 - b)&0xFF;
  798. uint64_t C= (0x100 - c)&0xFF;
  799. uint64_t D= (0x100 - c)&0xFF;
  800. lut[i] = (a<<56) | (b<<48) | (c<<40) | (d<<32) |
  801. (D<<24) | (C<<16) | (B<<8) | (A);
  802. //lut[i] = (v<<32) | (v<<24);
  803. }
  804. }
  805. #if 0
  806. asm volatile(
  807. "pxor %%mm7, %%mm7 \n\t" // 0
  808. // "movq b80, %%mm6 \n\t" // MIN_SIGNED_BYTE
  809. "leal (%0, %1), %%eax \n\t"
  810. "leal (%%eax, %1, 4), %%ebx \n\t"
  811. "movq b80, %%mm6 \n\t"
  812. "movd pQPb, %%mm5 \n\t" // QP
  813. "movq %%mm5, %%mm4 \n\t"
  814. "paddusb %%mm5, %%mm5 \n\t" // 2QP
  815. "paddusb %%mm5, %%mm4 \n\t" // 3QP
  816. "pxor %%mm5, %%mm5 \n\t" // 0
  817. "psubb %%mm4, %%mm5 \n\t" // -3QP
  818. "por bm11111110, %%mm5 \n\t" // ...,FF,FF,-3QP
  819. "psllq $24, %%mm5 \n\t"
  820. // 0 1 2 3 4 5 6 7 8 9
  821. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  822. #define HX1old(a) \
  823. "movd " #a ", %%mm0 \n\t"\
  824. "movd 4" #a ", %%mm1 \n\t"\
  825. "punpckldq %%mm1, %%mm0 \n\t"\
  826. "movq %%mm0, %%mm1 \n\t"\
  827. "movq %%mm0, %%mm2 \n\t"\
  828. "psrlq $8, %%mm1 \n\t"\
  829. "psubusb %%mm1, %%mm2 \n\t"\
  830. "psubusb %%mm0, %%mm1 \n\t"\
  831. "por %%mm2, %%mm1 \n\t" /* p´x = |px - p(x+1)| */\
  832. "pcmpeqb %%mm7, %%mm2 \n\t" /* p´x = sgn[px - p(x+1)] */\
  833. "pshufw $0x00, %%mm1, %%mm3 \n\t" /* p´5 = |p1 - p2| */\
  834. PAVGB(%%mm1, %%mm3) /* p´5 = (|p2-p1| + |p6-p5|)/2 */\
  835. "psrlq $16, %%mm3 \n\t" /* p´3 = (|p2-p1| + |p6-p5|)/2 */\
  836. "psubusb %%mm3, %%mm1 \n\t" /* |p3-p4|-(|p2-p1| + |p6-p5|)/2 */\
  837. "paddb %%mm5, %%mm1 \n\t"\
  838. "psubusb %%mm5, %%mm1 \n\t"\
  839. PAVGB(%%mm7, %%mm1)\
  840. "pxor %%mm2, %%mm1 \n\t"\
  841. "psubb %%mm2, %%mm1 \n\t"\
  842. "psrlq $24, %%mm1 \n\t"\
  843. "movd %%mm1, %%ecx \n\t"\
  844. "paddb %%mm6, %%mm0 \n\t"\
  845. "paddsb (%3, %%ecx, 8), %%mm0 \n\t"\
  846. "paddb %%mm6, %%mm0 \n\t"\
  847. "movq %%mm0, " #a " \n\t"\
  848. /*
  849. HX1old((%0))
  850. HX1old((%%eax))
  851. HX1old((%%eax, %1))
  852. HX1old((%%eax, %1, 2))
  853. HX1old((%0, %1, 4))
  854. HX1old((%%ebx))
  855. HX1old((%%ebx, %1))
  856. HX1old((%%ebx, %1, 2))
  857. */
  858. //FIXME add some comments, its unreadable ...
  859. #define HX1b(a, c, b, d) \
  860. "movd " #a ", %%mm0 \n\t"\
  861. "movd 4" #a ", %%mm1 \n\t"\
  862. "punpckldq %%mm1, %%mm0 \n\t"\
  863. "movd " #b ", %%mm4 \n\t"\
  864. "movq %%mm0, %%mm1 \n\t"\
  865. "movq %%mm0, %%mm2 \n\t"\
  866. "psrlq $8, %%mm1 \n\t"\
  867. "movd 4" #b ", %%mm3 \n\t"\
  868. "psubusb %%mm1, %%mm2 \n\t"\
  869. "psubusb %%mm0, %%mm1 \n\t"\
  870. "por %%mm2, %%mm1 \n\t" /* p´x = |px - p(x+1)| */\
  871. "pcmpeqb %%mm7, %%mm2 \n\t" /* p´x = sgn[px - p(x+1)] */\
  872. "punpckldq %%mm3, %%mm4 \n\t"\
  873. "movq %%mm1, %%mm3 \n\t"\
  874. "psllq $32, %%mm3 \n\t" /* p´5 = |p1 - p2| */\
  875. PAVGB(%%mm1, %%mm3) /* p´5 = (|p2-p1| + |p6-p5|)/2 */\
  876. "paddb %%mm6, %%mm0 \n\t"\
  877. "psrlq $16, %%mm3 \n\t" /* p´3 = (|p2-p1| + |p6-p5|)/2 */\
  878. "psubusb %%mm3, %%mm1 \n\t" /* |p3-p4|-(|p2-p1| + |p6-p5|)/2 */\
  879. "movq %%mm4, %%mm3 \n\t"\
  880. "paddb %%mm5, %%mm1 \n\t"\
  881. "psubusb %%mm5, %%mm1 \n\t"\
  882. "psrlq $8, %%mm3 \n\t"\
  883. PAVGB(%%mm7, %%mm1)\
  884. "pxor %%mm2, %%mm1 \n\t"\
  885. "psubb %%mm2, %%mm1 \n\t"\
  886. "movq %%mm4, %%mm2 \n\t"\
  887. "psrlq $24, %%mm1 \n\t"\
  888. "psubusb %%mm3, %%mm2 \n\t"\
  889. "movd %%mm1, %%ecx \n\t"\
  890. "psubusb %%mm4, %%mm3 \n\t"\
  891. "paddsb (%2, %%ecx, 8), %%mm0 \n\t"\
  892. "por %%mm2, %%mm3 \n\t" /* p´x = |px - p(x+1)| */\
  893. "paddb %%mm6, %%mm0 \n\t"\
  894. "pcmpeqb %%mm7, %%mm2 \n\t" /* p´x = sgn[px - p(x+1)] */\
  895. "movq %%mm3, %%mm1 \n\t"\
  896. "psllq $32, %%mm1 \n\t" /* p´5 = |p1 - p2| */\
  897. "movq %%mm0, " #a " \n\t"\
  898. PAVGB(%%mm3, %%mm1) /* p´5 = (|p2-p1| + |p6-p5|)/2 */\
  899. "paddb %%mm6, %%mm4 \n\t"\
  900. "psrlq $16, %%mm1 \n\t" /* p´3 = (|p2-p1| + |p6-p5|)/2 */\
  901. "psubusb %%mm1, %%mm3 \n\t" /* |p3-p4|-(|p2-p1| + |p6-p5|)/2 */\
  902. "paddb %%mm5, %%mm3 \n\t"\
  903. "psubusb %%mm5, %%mm3 \n\t"\
  904. PAVGB(%%mm7, %%mm3)\
  905. "pxor %%mm2, %%mm3 \n\t"\
  906. "psubb %%mm2, %%mm3 \n\t"\
  907. "psrlq $24, %%mm3 \n\t"\
  908. "movd " #c ", %%mm0 \n\t"\
  909. "movd 4" #c ", %%mm1 \n\t"\
  910. "punpckldq %%mm1, %%mm0 \n\t"\
  911. "paddb %%mm6, %%mm0 \n\t"\
  912. "paddsb (%2, %%ecx, 8), %%mm0 \n\t"\
  913. "paddb %%mm6, %%mm0 \n\t"\
  914. "movq %%mm0, " #c " \n\t"\
  915. "movd %%mm3, %%ecx \n\t"\
  916. "movd " #d ", %%mm0 \n\t"\
  917. "paddsb (%2, %%ecx, 8), %%mm4 \n\t"\
  918. "movd 4" #d ", %%mm1 \n\t"\
  919. "paddb %%mm6, %%mm4 \n\t"\
  920. "punpckldq %%mm1, %%mm0 \n\t"\
  921. "movq %%mm4, " #b " \n\t"\
  922. "paddb %%mm6, %%mm0 \n\t"\
  923. "paddsb (%2, %%ecx, 8), %%mm0 \n\t"\
  924. "paddb %%mm6, %%mm0 \n\t"\
  925. "movq %%mm0, " #d " \n\t"\
  926. HX1b((%0),(%%eax),(%%eax, %1),(%%eax, %1, 2))
  927. HX1b((%0, %1, 4),(%%ebx),(%%ebx, %1),(%%ebx, %1, 2))
  928. :
  929. : "r" (src), "r" (stride), "r" (lut)
  930. : "%eax", "%ebx", "%ecx"
  931. );
  932. #else
  933. //FIXME (has little in common with the mmx2 version)
  934. for(y=0; y<BLOCK_SIZE; y++)
  935. {
  936. int a= src[1] - src[2];
  937. int b= src[3] - src[4];
  938. int c= src[5] - src[6];
  939. int d= MAX(ABS(b) - (ABS(a) + ABS(c))/2, 0);
  940. if(d < QP)
  941. {
  942. int v = d * SIGN(-b);
  943. src[1] +=v/8;
  944. src[2] +=v/4;
  945. src[3] +=3*v/8;
  946. src[4] -=3*v/8;
  947. src[5] -=v/4;
  948. src[6] -=v/8;
  949. }
  950. src+=stride;
  951. }
  952. #endif
  953. }
  954. static inline void doVertDefFilter(uint8_t src[], int stride, int QP)
  955. {
  956. #ifdef HAVE_MMX
  957. src+= stride*4;
  958. //FIXME try pmul for *5 stuff
  959. // src[0]=0;
  960. asm volatile(
  961. "pxor %%mm7, %%mm7 \n\t"
  962. "leal (%0, %1), %%eax \n\t"
  963. "leal (%%eax, %1, 4), %%ebx \n\t"
  964. // 0 1 2 3 4 5 6 7
  965. // %0 %0+%1 %0+2%1 eax+2%1 %0+4%1 eax+4%1 ebx+%1 ebx+2%1
  966. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1
  967. "movq (%0), %%mm0 \n\t"
  968. "movq %%mm0, %%mm1 \n\t"
  969. "punpcklbw %%mm7, %%mm0 \n\t" // low part of line 0
  970. "punpckhbw %%mm7, %%mm1 \n\t" // high part of line 0
  971. "movq (%%eax), %%mm2 \n\t"
  972. "movq %%mm2, %%mm3 \n\t"
  973. "punpcklbw %%mm7, %%mm2 \n\t" // low part of line 1
  974. "punpckhbw %%mm7, %%mm3 \n\t" // high part of line 1
  975. "movq (%%eax, %1), %%mm4 \n\t"
  976. "movq %%mm4, %%mm5 \n\t"
  977. "punpcklbw %%mm7, %%mm4 \n\t" // low part of line 2
  978. "punpckhbw %%mm7, %%mm5 \n\t" // high part of line 2
  979. "paddw %%mm0, %%mm0 \n\t" // 2L0
  980. "paddw %%mm1, %%mm1 \n\t" // 2H0
  981. "psubw %%mm4, %%mm2 \n\t" // L1 - L2
  982. "psubw %%mm5, %%mm3 \n\t" // H1 - H2
  983. "psubw %%mm2, %%mm0 \n\t" // 2L0 - L1 + L2
  984. "psubw %%mm3, %%mm1 \n\t" // 2H0 - H1 + H2
  985. "psllw $2, %%mm2 \n\t" // 4L1 - 4L2
  986. "psllw $2, %%mm3 \n\t" // 4H1 - 4H2
  987. "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2
  988. "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2
  989. "movq (%%eax, %1, 2), %%mm2 \n\t"
  990. "movq %%mm2, %%mm3 \n\t"
  991. "punpcklbw %%mm7, %%mm2 \n\t" // L3
  992. "punpckhbw %%mm7, %%mm3 \n\t" // H3
  993. "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2 - L3
  994. "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2 - H3
  995. "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
  996. "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
  997. "movq %%mm0, temp0 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
  998. "movq %%mm1, temp1 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
  999. "movq (%0, %1, 4), %%mm0 \n\t"
  1000. "movq %%mm0, %%mm1 \n\t"
  1001. "punpcklbw %%mm7, %%mm0 \n\t" // L4
  1002. "punpckhbw %%mm7, %%mm1 \n\t" // H4
  1003. "psubw %%mm0, %%mm2 \n\t" // L3 - L4
  1004. "psubw %%mm1, %%mm3 \n\t" // H3 - H4
  1005. "movq %%mm2, temp2 \n\t" // L3 - L4
  1006. "movq %%mm3, temp3 \n\t" // H3 - H4
  1007. "paddw %%mm4, %%mm4 \n\t" // 2L2
  1008. "paddw %%mm5, %%mm5 \n\t" // 2H2
  1009. "psubw %%mm2, %%mm4 \n\t" // 2L2 - L3 + L4
  1010. "psubw %%mm3, %%mm5 \n\t" // 2H2 - H3 + H4
  1011. "psllw $2, %%mm2 \n\t" // 4L3 - 4L4
  1012. "psllw $2, %%mm3 \n\t" // 4H3 - 4H4
  1013. "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4
  1014. "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4
  1015. //50 opcodes so far
  1016. "movq (%%ebx), %%mm2 \n\t"
  1017. "movq %%mm2, %%mm3 \n\t"
  1018. "punpcklbw %%mm7, %%mm2 \n\t" // L5
  1019. "punpckhbw %%mm7, %%mm3 \n\t" // H5
  1020. "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4 - L5
  1021. "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4 - H5
  1022. "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4 - 2L5
  1023. "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4 - 2H5
  1024. "movq (%%ebx, %1), %%mm6 \n\t"
  1025. "punpcklbw %%mm7, %%mm6 \n\t" // L6
  1026. "psubw %%mm6, %%mm2 \n\t" // L5 - L6
  1027. "movq (%%ebx, %1), %%mm6 \n\t"
  1028. "punpckhbw %%mm7, %%mm6 \n\t" // H6
  1029. "psubw %%mm6, %%mm3 \n\t" // H5 - H6
  1030. "paddw %%mm0, %%mm0 \n\t" // 2L4
  1031. "paddw %%mm1, %%mm1 \n\t" // 2H4
  1032. "psubw %%mm2, %%mm0 \n\t" // 2L4 - L5 + L6
  1033. "psubw %%mm3, %%mm1 \n\t" // 2H4 - H5 + H6
  1034. "psllw $2, %%mm2 \n\t" // 4L5 - 4L6
  1035. "psllw $2, %%mm3 \n\t" // 4H5 - 4H6
  1036. "psubw %%mm2, %%mm0 \n\t" // 2L4 - 5L5 + 5L6
  1037. "psubw %%mm3, %%mm1 \n\t" // 2H4 - 5H5 + 5H6
  1038. "movq (%%ebx, %1, 2), %%mm2 \n\t"
  1039. "movq %%mm2, %%mm3 \n\t"
  1040. "punpcklbw %%mm7, %%mm2 \n\t" // L7
  1041. "punpckhbw %%mm7, %%mm3 \n\t" // H7
  1042. "paddw %%mm2, %%mm2 \n\t" // 2L7
  1043. "paddw %%mm3, %%mm3 \n\t" // 2H7
  1044. "psubw %%mm2, %%mm0 \n\t" // 2L4 - 5L5 + 5L6 - 2L7
  1045. "psubw %%mm3, %%mm1 \n\t" // 2H4 - 5H5 + 5H6 - 2H7
  1046. "movq temp0, %%mm2 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
  1047. "movq temp1, %%mm3 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
  1048. //FIXME pxor, psubw, pmax for abs
  1049. "movq %%mm7, %%mm6 \n\t" // 0
  1050. "pcmpgtw %%mm0, %%mm6 \n\t"
  1051. "pxor %%mm6, %%mm0 \n\t"
  1052. "psubw %%mm6, %%mm0 \n\t" // |2L4 - 5L5 + 5L6 - 2L7|
  1053. "movq %%mm7, %%mm6 \n\t" // 0
  1054. "pcmpgtw %%mm1, %%mm6 \n\t"
  1055. "pxor %%mm6, %%mm1 \n\t"
  1056. "psubw %%mm6, %%mm1 \n\t" // |2H4 - 5H5 + 5H6 - 2H7|
  1057. "movq %%mm7, %%mm6 \n\t" // 0
  1058. "pcmpgtw %%mm2, %%mm6 \n\t"
  1059. "pxor %%mm6, %%mm2 \n\t"
  1060. "psubw %%mm6, %%mm2 \n\t" // |2L0 - 5L1 + 5L2 - 2L3|
  1061. "movq %%mm7, %%mm6 \n\t" // 0
  1062. "pcmpgtw %%mm3, %%mm6 \n\t"
  1063. "pxor %%mm6, %%mm3 \n\t"
  1064. "psubw %%mm6, %%mm3 \n\t" // |2H0 - 5H1 + 5H2 - 2H3|
  1065. #ifdef HAVE_MMX2
  1066. "pminsw %%mm2, %%mm0 \n\t"
  1067. "pminsw %%mm3, %%mm1 \n\t"
  1068. #else
  1069. "movq %%mm0, %%mm6 \n\t"
  1070. "psubusw %%mm2, %%mm6 \n\t"
  1071. "psubw %%mm6, %%mm0 \n\t"
  1072. "movq %%mm1, %%mm6 \n\t"
  1073. "psubusw %%mm3, %%mm6 \n\t"
  1074. "psubw %%mm6, %%mm1 \n\t"
  1075. #endif
  1076. "movq %%mm7, %%mm6 \n\t" // 0
  1077. "pcmpgtw %%mm4, %%mm6 \n\t" // sign(2L2 - 5L3 + 5L4 - 2L5)
  1078. "pxor %%mm6, %%mm4 \n\t"
  1079. "psubw %%mm6, %%mm4 \n\t" // |2L2 - 5L3 + 5L4 - 2L5|
  1080. "pcmpgtw %%mm5, %%mm7 \n\t" // sign(2H2 - 5H3 + 5H4 - 2H5)
  1081. "pxor %%mm7, %%mm5 \n\t"
  1082. "psubw %%mm7, %%mm5 \n\t" // |2H2 - 5H3 + 5H4 - 2H5|
  1083. // 100 opcodes
  1084. "movd %2, %%mm2 \n\t" // QP
  1085. "punpcklwd %%mm2, %%mm2 \n\t"
  1086. "punpcklwd %%mm2, %%mm2 \n\t"
  1087. "psllw $3, %%mm2 \n\t" // 8QP
  1088. "movq %%mm2, %%mm3 \n\t" // 8QP
  1089. "pcmpgtw %%mm4, %%mm2 \n\t"
  1090. "pcmpgtw %%mm5, %%mm3 \n\t"
  1091. "pand %%mm2, %%mm4 \n\t"
  1092. "pand %%mm3, %%mm5 \n\t"
  1093. "psubusw %%mm0, %%mm4 \n\t" // hd
  1094. "psubusw %%mm1, %%mm5 \n\t" // ld
  1095. "movq w05, %%mm2 \n\t" // 5
  1096. "pmullw %%mm2, %%mm4 \n\t"
  1097. "pmullw %%mm2, %%mm5 \n\t"
  1098. "movq w20, %%mm2 \n\t" // 32
  1099. "paddw %%mm2, %%mm4 \n\t"
  1100. "paddw %%mm2, %%mm5 \n\t"
  1101. "psrlw $6, %%mm4 \n\t"
  1102. "psrlw $6, %%mm5 \n\t"
  1103. /*
  1104. "movq w06, %%mm2 \n\t" // 6
  1105. "paddw %%mm2, %%mm4 \n\t"
  1106. "paddw %%mm2, %%mm5 \n\t"
  1107. "movq w1400, %%mm2 \n\t" // 1400h = 5120 = 5/64*2^16
  1108. //FIXME if *5/64 is supposed to be /13 then we should use 5041 instead of 5120
  1109. "pmulhw %%mm2, %%mm4 \n\t" // hd/13
  1110. "pmulhw %%mm2, %%mm5 \n\t" // ld/13
  1111. */
  1112. "movq temp2, %%mm0 \n\t" // L3 - L4
  1113. "movq temp3, %%mm1 \n\t" // H3 - H4
  1114. "pxor %%mm2, %%mm2 \n\t"
  1115. "pxor %%mm3, %%mm3 \n\t"
  1116. "pcmpgtw %%mm0, %%mm2 \n\t" // sign (L3-L4)
  1117. "pcmpgtw %%mm1, %%mm3 \n\t" // sign (H3-H4)
  1118. "pxor %%mm2, %%mm0 \n\t"
  1119. "pxor %%mm3, %%mm1 \n\t"
  1120. "psubw %%mm2, %%mm0 \n\t" // |L3-L4|
  1121. "psubw %%mm3, %%mm1 \n\t" // |H3-H4|
  1122. "psrlw $1, %%mm0 \n\t" // |L3 - L4|/2
  1123. "psrlw $1, %%mm1 \n\t" // |H3 - H4|/2
  1124. "pxor %%mm6, %%mm2 \n\t"
  1125. "pxor %%mm7, %%mm3 \n\t"
  1126. "pand %%mm2, %%mm4 \n\t"
  1127. "pand %%mm3, %%mm5 \n\t"
  1128. #ifdef HAVE_MMX2
  1129. "pminsw %%mm0, %%mm4 \n\t"
  1130. "pminsw %%mm1, %%mm5 \n\t"
  1131. #else
  1132. "movq %%mm4, %%mm2 \n\t"
  1133. "psubusw %%mm0, %%mm2 \n\t"
  1134. "psubw %%mm2, %%mm4 \n\t"
  1135. "movq %%mm5, %%mm2 \n\t"
  1136. "psubusw %%mm1, %%mm2 \n\t"
  1137. "psubw %%mm2, %%mm5 \n\t"
  1138. #endif
  1139. "pxor %%mm6, %%mm4 \n\t"
  1140. "pxor %%mm7, %%mm5 \n\t"
  1141. "psubw %%mm6, %%mm4 \n\t"
  1142. "psubw %%mm7, %%mm5 \n\t"
  1143. "packsswb %%mm5, %%mm4 \n\t"
  1144. "movq (%%eax, %1, 2), %%mm0 \n\t"
  1145. "paddb %%mm4, %%mm0 \n\t"
  1146. "movq %%mm0, (%%eax, %1, 2) \n\t"
  1147. "movq (%0, %1, 4), %%mm0 \n\t"
  1148. "psubb %%mm4, %%mm0 \n\t"
  1149. "movq %%mm0, (%0, %1, 4) \n\t"
  1150. :
  1151. : "r" (src), "r" (stride), "r" (QP)
  1152. : "%eax", "%ebx"
  1153. );
  1154. #else
  1155. const int l1= stride;
  1156. const int l2= stride + l1;
  1157. const int l3= stride + l2;
  1158. const int l4= stride + l3;
  1159. const int l5= stride + l4;
  1160. const int l6= stride + l5;
  1161. const int l7= stride + l6;
  1162. const int l8= stride + l7;
  1163. // const int l9= stride + l8;
  1164. int x;
  1165. src+= stride*3;
  1166. for(x=0; x<BLOCK_SIZE; x++)
  1167. {
  1168. const int middleEnergy= 5*(src[l5] - src[l4]) + 2*(src[l3] - src[l6]);
  1169. if(ABS(middleEnergy) < 8*QP)
  1170. {
  1171. const int q=(src[l4] - src[l5])/2;
  1172. const int leftEnergy= 5*(src[l3] - src[l2]) + 2*(src[l1] - src[l4]);
  1173. const int rightEnergy= 5*(src[l7] - src[l6]) + 2*(src[l5] - src[l8]);
  1174. int d= ABS(middleEnergy) - MIN( ABS(leftEnergy), ABS(rightEnergy) );
  1175. d= MAX(d, 0);
  1176. d= (5*d + 32) >> 6;
  1177. d*= SIGN(-middleEnergy);
  1178. if(q>0)
  1179. {
  1180. d= d<0 ? 0 : d;
  1181. d= d>q ? q : d;
  1182. }
  1183. else
  1184. {
  1185. d= d>0 ? 0 : d;
  1186. d= d<q ? q : d;
  1187. }
  1188. src[l4]-= d;
  1189. src[l5]+= d;
  1190. }
  1191. src++;
  1192. }
  1193. #endif
  1194. }
  1195. //FIXME? |255-0| = 1
  1196. /**
  1197. * Check if the given 8x8 Block is mostly "flat"
  1198. */
  1199. static inline int isHorizDC(uint8_t src[], int stride)
  1200. {
  1201. // src++;
  1202. int numEq= 0;
  1203. #if 0
  1204. asm volatile (
  1205. // "int $3 \n\t"
  1206. "leal (%1, %2), %%ecx \n\t"
  1207. "leal (%%ecx, %2, 4), %%ebx \n\t"
  1208. // 0 1 2 3 4 5 6 7 8 9
  1209. // %1 ecx ecx+%2 ecx+2%2 %1+4%2 ebx ebx+%2 ebx+2%2 %1+8%2 ebx+4%2
  1210. "movq b7E, %%mm7 \n\t" // mm7 = 0x7F
  1211. "movq b7C, %%mm6 \n\t" // mm6 = 0x7D
  1212. "pxor %%mm0, %%mm0 \n\t"
  1213. "movl %1, %%eax \n\t"
  1214. "andl $0x1F, %%eax \n\t"
  1215. "cmpl $24, %%eax \n\t"
  1216. "leal tempBlock, %%eax \n\t"
  1217. "jb 1f \n\t"
  1218. #define HDC_CHECK_AND_CPY(src, dst) \
  1219. "movd " #src ", %%mm2 \n\t"\
  1220. "punpckldq 4" #src ", %%mm2 \n\t" /* (%1) */\
  1221. "movq %%mm2, %%mm1 \n\t"\
  1222. "psrlq $8, %%mm2 \n\t"\
  1223. "psubb %%mm1, %%mm2 \n\t"\
  1224. "paddb %%mm7, %%mm2 \n\t"\
  1225. "pcmpgtb %%mm6, %%mm2 \n\t"\
  1226. "paddb %%mm2, %%mm0 \n\t"\
  1227. "movq %%mm1," #dst "(%%eax) \n\t"
  1228. HDC_CHECK_AND_CPY((%1),0)
  1229. HDC_CHECK_AND_CPY((%%ecx),8)
  1230. HDC_CHECK_AND_CPY((%%ecx, %2),16)
  1231. HDC_CHECK_AND_CPY((%%ecx, %2, 2),24)
  1232. HDC_CHECK_AND_CPY((%1, %2, 4),32)
  1233. HDC_CHECK_AND_CPY((%%ebx),40)
  1234. HDC_CHECK_AND_CPY((%%ebx, %2),48)
  1235. HDC_CHECK_AND_CPY((%%ebx, %2, 2),56)
  1236. "jmp 2f \n\t"
  1237. "1: \n\t"
  1238. // src does not cross a 32 byte cache line so dont waste time with alignment
  1239. #define HDC_CHECK_AND_CPY2(src, dst) \
  1240. "movq " #src ", %%mm2 \n\t"\
  1241. "movq " #src ", %%mm1 \n\t"\
  1242. "psrlq $8, %%mm2 \n\t"\
  1243. "psubb %%mm1, %%mm2 \n\t"\
  1244. "paddb %%mm7, %%mm2 \n\t"\
  1245. "pcmpgtb %%mm6, %%mm2 \n\t"\
  1246. "paddb %%mm2, %%mm0 \n\t"\
  1247. "movq %%mm1," #dst "(%%eax) \n\t"
  1248. HDC_CHECK_AND_CPY2((%1),0)
  1249. HDC_CHECK_AND_CPY2((%%ecx),8)
  1250. HDC_CHECK_AND_CPY2((%%ecx, %2),16)
  1251. HDC_CHECK_AND_CPY2((%%ecx, %2, 2),24)
  1252. HDC_CHECK_AND_CPY2((%1, %2, 4),32)
  1253. HDC_CHECK_AND_CPY2((%%ebx),40)
  1254. HDC_CHECK_AND_CPY2((%%ebx, %2),48)
  1255. HDC_CHECK_AND_CPY2((%%ebx, %2, 2),56)
  1256. "2: \n\t"
  1257. "psllq $8, %%mm0 \n\t" // remove dummy value
  1258. "movq %%mm0, %%mm1 \n\t"
  1259. "psrlw $8, %%mm0 \n\t"
  1260. "paddb %%mm1, %%mm0 \n\t"
  1261. "movq %%mm0, %%mm1 \n\t"
  1262. "psrlq $16, %%mm0 \n\t"
  1263. "paddb %%mm1, %%mm0 \n\t"
  1264. "movq %%mm0, %%mm1 \n\t"
  1265. "psrlq $32, %%mm0 \n\t"
  1266. "paddb %%mm1, %%mm0 \n\t"
  1267. "movd %%mm0, %0 \n\t"
  1268. : "=r" (numEq)
  1269. : "r" (src), "r" (stride)
  1270. : "%eax", "%ebx", "%ecx"
  1271. );
  1272. // printf("%d\n", numEq);
  1273. numEq= (256 - numEq) &0xFF;
  1274. #else
  1275. int y;
  1276. for(y=0; y<BLOCK_SIZE; y++)
  1277. {
  1278. if(((src[0] - src[1] + 1) & 0xFFFF) < 3) numEq++;
  1279. if(((src[1] - src[2] + 1) & 0xFFFF) < 3) numEq++;
  1280. if(((src[2] - src[3] + 1) & 0xFFFF) < 3) numEq++;
  1281. if(((src[3] - src[4] + 1) & 0xFFFF) < 3) numEq++;
  1282. if(((src[4] - src[5] + 1) & 0xFFFF) < 3) numEq++;
  1283. if(((src[5] - src[6] + 1) & 0xFFFF) < 3) numEq++;
  1284. if(((src[6] - src[7] + 1) & 0xFFFF) < 3) numEq++;
  1285. src+= stride;
  1286. }
  1287. #endif
  1288. /* if(abs(numEq - asmEq) > 0)
  1289. {
  1290. // printf("\nasm:%d c:%d\n", asmEq, numEq);
  1291. for(int y=0; y<8; y++)
  1292. {
  1293. for(int x=0; x<8; x++)
  1294. {
  1295. printf("%d ", src[x + y*stride]);
  1296. }
  1297. printf("\n");
  1298. }
  1299. }
  1300. */
  1301. // printf("%d\n", numEq);
  1302. return numEq > hFlatnessThreshold;
  1303. }
  1304. static inline int isHorizMinMaxOk(uint8_t src[], int stride, int QP)
  1305. {
  1306. if(abs(src[0] - src[7]) > 2*QP) return 0;
  1307. return 1;
  1308. }
  1309. static inline void doHorizDefFilter(uint8_t dst[], int stride, int QP)
  1310. {
  1311. #if 0
  1312. asm volatile(
  1313. "leal (%0, %1), %%ecx \n\t"
  1314. "leal (%%ecx, %1, 4), %%ebx \n\t"
  1315. // 0 1 2 3 4 5 6 7 8 9
  1316. // %0 ecx ecx+%1 ecx+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1317. "pxor %%mm7, %%mm7 \n\t"
  1318. "movq bm00001000, %%mm6 \n\t"
  1319. "movd %2, %%mm5 \n\t" // QP
  1320. "movq %%mm5, %%mm4 \n\t"
  1321. "paddusb %%mm5, %%mm5 \n\t" // 2QP
  1322. "paddusb %%mm5, %%mm4 \n\t" // 3QP
  1323. "psllq $24, %%mm4 \n\t"
  1324. "pxor %%mm5, %%mm5 \n\t" // 0
  1325. "psubb %%mm4, %%mm5 \n\t" // -QP
  1326. "leal tempBlock, %%eax \n\t"
  1327. //FIXME? "unroll by 2" and mix
  1328. #ifdef HAVE_MMX2
  1329. #define HDF(src, dst) \
  1330. "movq " #src "(%%eax), %%mm0 \n\t"\
  1331. "movq " #src "(%%eax), %%mm1 \n\t"\
  1332. "movq " #src "(%%eax), %%mm2 \n\t"\
  1333. "psrlq $8, %%mm1 \n\t"\
  1334. "psubusb %%mm1, %%mm2 \n\t"\
  1335. "psubusb %%mm0, %%mm1 \n\t"\
  1336. "por %%mm2, %%mm1 \n\t" /* p´x = |px - p(x+1)| */\
  1337. "pcmpeqb %%mm7, %%mm2 \n\t" /* p´x = sgn[px - p(x+1)] */\
  1338. "pshufw $0x00, %%mm1, %%mm3 \n\t" /* p´5 = |p1 - p2| */\
  1339. "pminub %%mm1, %%mm3 \n\t" /* p´5 = min(|p2-p1|, |p6-p5|)*/\
  1340. "psrlq $16, %%mm3 \n\t" /* p´3 = min(|p2-p1|, |p6-p5|)*/\
  1341. "psubusb %%mm3, %%mm1 \n\t" /* |p3-p4|-min(|p1-p2|,|p5-p6|) */\
  1342. "paddb %%mm5, %%mm1 \n\t"\
  1343. "psubusb %%mm5, %%mm1 \n\t"\
  1344. "psrlw $2, %%mm1 \n\t"\
  1345. "pxor %%mm2, %%mm1 \n\t"\
  1346. "psubb %%mm2, %%mm1 \n\t"\
  1347. "pand %%mm6, %%mm1 \n\t"\
  1348. "psubb %%mm1, %%mm0 \n\t"\
  1349. "psllq $8, %%mm1 \n\t"\
  1350. "paddb %%mm1, %%mm0 \n\t"\
  1351. "movd %%mm0, " #dst" \n\t"\
  1352. "psrlq $32, %%mm0 \n\t"\
  1353. "movd %%mm0, 4" #dst" \n\t"
  1354. #else
  1355. #define HDF(src, dst)\
  1356. "movq " #src "(%%eax), %%mm0 \n\t"\
  1357. "movq %%mm0, %%mm1 \n\t"\
  1358. "movq %%mm0, %%mm2 \n\t"\
  1359. "psrlq $8, %%mm1 \n\t"\
  1360. "psubusb %%mm1, %%mm2 \n\t"\
  1361. "psubusb %%mm0, %%mm1 \n\t"\
  1362. "por %%mm2, %%mm1 \n\t" /* p´x = |px - p(x+1)| */\
  1363. "pcmpeqb %%mm7, %%mm2 \n\t" /* p´x = sgn[px - p(x+1)] */\
  1364. "movq %%mm1, %%mm3 \n\t"\
  1365. "psllq $32, %%mm3 \n\t"\
  1366. "movq %%mm3, %%mm4 \n\t"\
  1367. "psubusb %%mm1, %%mm4 \n\t"\
  1368. "psubb %%mm4, %%mm3 \n\t"\
  1369. "psrlq $16, %%mm3 \n\t" /* p´3 = min(|p2-p1|, |p6-p5|)*/\
  1370. "psubusb %%mm3, %%mm1 \n\t" /* |p3-p4|-min(|p1-p2|,|p5,ü6|) */\
  1371. "paddb %%mm5, %%mm1 \n\t"\
  1372. "psubusb %%mm5, %%mm1 \n\t"\
  1373. "psrlw $2, %%mm1 \n\t"\
  1374. "pxor %%mm2, %%mm1 \n\t"\
  1375. "psubb %%mm2, %%mm1 \n\t"\
  1376. "pand %%mm6, %%mm1 \n\t"\
  1377. "psubb %%mm1, %%mm0 \n\t"\
  1378. "psllq $8, %%mm1 \n\t"\
  1379. "paddb %%mm1, %%mm0 \n\t"\
  1380. "movd %%mm0, " #dst " \n\t"\
  1381. "psrlq $32, %%mm0 \n\t"\
  1382. "movd %%mm0, 4" #dst " \n\t"
  1383. #endif
  1384. HDF(0,(%0))
  1385. HDF(8,(%%ecx))
  1386. HDF(16,(%%ecx, %1))
  1387. HDF(24,(%%ecx, %1, 2))
  1388. HDF(32,(%0, %1, 4))
  1389. HDF(40,(%%ebx))
  1390. HDF(48,(%%ebx, %1))
  1391. HDF(56,(%%ebx, %1, 2))
  1392. :
  1393. : "r" (dst), "r" (stride), "r" (QP)
  1394. : "%eax", "%ebx", "%ecx"
  1395. );
  1396. #else
  1397. int y;
  1398. for(y=0; y<BLOCK_SIZE; y++)
  1399. {
  1400. const int middleEnergy= 5*(dst[4] - dst[5]) + 2*(dst[2] - dst[5]);
  1401. if(ABS(middleEnergy) < 8*QP)
  1402. {
  1403. const int q=(dst[3] - dst[4])/2;
  1404. const int leftEnergy= 5*(dst[2] - dst[1]) + 2*(dst[0] - dst[3]);
  1405. const int rightEnergy= 5*(dst[6] - dst[5]) + 2*(dst[4] - dst[7]);
  1406. int d= ABS(middleEnergy) - MIN( ABS(leftEnergy), ABS(rightEnergy) );
  1407. d= MAX(d, 0);
  1408. d= (5*d + 32) >> 6;
  1409. d*= SIGN(-middleEnergy);
  1410. if(q>0)
  1411. {
  1412. d= d<0 ? 0 : d;
  1413. d= d>q ? q : d;
  1414. }
  1415. else
  1416. {
  1417. d= d>0 ? 0 : d;
  1418. d= d<q ? q : d;
  1419. }
  1420. dst[3]-= d;
  1421. dst[4]+= d;
  1422. }
  1423. dst+= stride;
  1424. }
  1425. #endif
  1426. }
  1427. /**
  1428. * Do a horizontal low pass filter on the 10x8 block (dst points to middle 8x8 Block)
  1429. * using the 9-Tap Filter (1,1,2,2,4,2,2,1,1)/16 (C version)
  1430. * using the 7-Tap Filter (2,2,2,4,2,2,2)/16 (MMX2/3DNOW version)
  1431. */
  1432. static inline void doHorizLowPass(uint8_t dst[], int stride, int QP)
  1433. {
  1434. #if 0
  1435. asm volatile(
  1436. "leal (%0, %1), %%ecx \n\t"
  1437. "leal (%%ecx, %1, 4), %%ebx \n\t"
  1438. // 0 1 2 3 4 5 6 7 8 9
  1439. // %0 ecx ecx+%1 ecx+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1440. "pxor %%mm7, %%mm7 \n\t"
  1441. "leal tempBlock, %%eax \n\t"
  1442. /*
  1443. #define HLP1 "movq (%0), %%mm0 \n\t"\
  1444. "movq %%mm0, %%mm1 \n\t"\
  1445. "psllq $8, %%mm0 \n\t"\
  1446. PAVGB(%%mm1, %%mm0)\
  1447. "psrlw $8, %%mm0 \n\t"\
  1448. "pxor %%mm1, %%mm1 \n\t"\
  1449. "packuswb %%mm1, %%mm0 \n\t"\
  1450. "movq %%mm0, %%mm1 \n\t"\
  1451. "movq %%mm0, %%mm2 \n\t"\
  1452. "psllq $32, %%mm0 \n\t"\
  1453. "paddb %%mm0, %%mm1 \n\t"\
  1454. "psllq $16, %%mm2 \n\t"\
  1455. PAVGB(%%mm2, %%mm0)\
  1456. "movq %%mm0, %%mm3 \n\t"\
  1457. "pand bm11001100, %%mm0 \n\t"\
  1458. "paddusb %%mm0, %%mm3 \n\t"\
  1459. "psrlq $8, %%mm3 \n\t"\
  1460. PAVGB(%%mm1, %%mm4)\
  1461. PAVGB(%%mm3, %%mm2)\
  1462. "psrlq $16, %%mm2 \n\t"\
  1463. "punpcklbw %%mm2, %%mm2 \n\t"\
  1464. "movq %%mm2, (%0) \n\t"\
  1465. #define HLP2 "movq (%0), %%mm0 \n\t"\
  1466. "movq %%mm0, %%mm1 \n\t"\
  1467. "psllq $8, %%mm0 \n\t"\
  1468. PAVGB(%%mm1, %%mm0)\
  1469. "psrlw $8, %%mm0 \n\t"\
  1470. "pxor %%mm1, %%mm1 \n\t"\
  1471. "packuswb %%mm1, %%mm0 \n\t"\
  1472. "movq %%mm0, %%mm2 \n\t"\
  1473. "psllq $32, %%mm0 \n\t"\
  1474. "psllq $16, %%mm2 \n\t"\
  1475. PAVGB(%%mm2, %%mm0)\
  1476. "movq %%mm0, %%mm3 \n\t"\
  1477. "pand bm11001100, %%mm0 \n\t"\
  1478. "paddusb %%mm0, %%mm3 \n\t"\
  1479. "psrlq $8, %%mm3 \n\t"\
  1480. PAVGB(%%mm3, %%mm2)\
  1481. "psrlq $16, %%mm2 \n\t"\
  1482. "punpcklbw %%mm2, %%mm2 \n\t"\
  1483. "movq %%mm2, (%0) \n\t"\
  1484. */
  1485. // approximately a 7-Tap Filter with Vector (1,2,3,4,3,2,1)/16
  1486. /*
  1487. Implemented Exact 7-Tap
  1488. 9421 A321
  1489. 36421 64321
  1490. 334321 =
  1491. 1234321 =
  1492. 1234321 =
  1493. 123433 =
  1494. 12463 12346
  1495. 1249 123A
  1496. */
  1497. #ifdef HAVE_MMX2
  1498. #define HLP3(i) "movq " #i "(%%eax), %%mm0 \n\t"\
  1499. "movq %%mm0, %%mm1 \n\t"\
  1500. "movq %%mm0, %%mm2 \n\t"\
  1501. "movq %%mm0, %%mm3 \n\t"\
  1502. "movq %%mm0, %%mm4 \n\t"\
  1503. "psllq $8, %%mm1 \n\t"\
  1504. "psrlq $8, %%mm2 \n\t"\
  1505. "pand bm00000001, %%mm3 \n\t"\
  1506. "pand bm10000000, %%mm4 \n\t"\
  1507. "por %%mm3, %%mm1 \n\t"\
  1508. "por %%mm4, %%mm2 \n\t"\
  1509. PAVGB(%%mm2, %%mm1)\
  1510. PAVGB(%%mm1, %%mm0)\
  1511. \
  1512. "pshufw $0xF9, %%mm0, %%mm3 \n\t"\
  1513. "pshufw $0x90, %%mm0, %%mm4 \n\t"\
  1514. PAVGB(%%mm3, %%mm4)\
  1515. PAVGB(%%mm4, %%mm0)\
  1516. "movd %%mm0, (%0) \n\t"\
  1517. "psrlq $32, %%mm0 \n\t"\
  1518. "movd %%mm0, 4(%0) \n\t"
  1519. #else
  1520. #define HLP3(i) "movq " #i "(%%eax), %%mm0 \n\t"\
  1521. "movq %%mm0, %%mm1 \n\t"\
  1522. "movq %%mm0, %%mm2 \n\t"\
  1523. "movd -4(%0), %%mm3 \n\t" /*0001000*/\
  1524. "movd 8(%0), %%mm4 \n\t" /*0001000*/\
  1525. "psllq $8, %%mm1 \n\t"\
  1526. "psrlq $8, %%mm2 \n\t"\
  1527. "psrlq $24, %%mm3 \n\t"\
  1528. "psllq $56, %%mm4 \n\t"\
  1529. "por %%mm3, %%mm1 \n\t"\
  1530. "por %%mm4, %%mm2 \n\t"\
  1531. PAVGB(%%mm2, %%mm1)\
  1532. PAVGB(%%mm1, %%mm0)\
  1533. \
  1534. "movq %%mm0, %%mm3 \n\t"\
  1535. "movq %%mm0, %%mm4 \n\t"\
  1536. "movq %%mm0, %%mm5 \n\t"\
  1537. "psrlq $16, %%mm3 \n\t"\
  1538. "psllq $16, %%mm4 \n\t"\
  1539. "pand bm11000000, %%mm5 \n\t"\
  1540. "por %%mm5, %%mm3 \n\t"\
  1541. "movq %%mm0, %%mm5 \n\t"\
  1542. "pand bm00000011, %%mm5 \n\t"\
  1543. "por %%mm5, %%mm4 \n\t"\
  1544. PAVGB(%%mm3, %%mm4)\
  1545. PAVGB(%%mm4, %%mm0)\
  1546. "movd %%mm0, (%0) \n\t"\
  1547. "psrlq $32, %%mm0 \n\t"\
  1548. "movd %%mm0, 4(%0) \n\t"
  1549. #endif
  1550. /* uses the 7-Tap Filter: 1112111 */
  1551. #define NEW_HLP(src, dst)\
  1552. "movq " #src "(%%eax), %%mm1 \n\t"\
  1553. "movq " #src "(%%eax), %%mm2 \n\t"\
  1554. "psllq $8, %%mm1 \n\t"\
  1555. "psrlq $8, %%mm2 \n\t"\
  1556. "movd -4" #dst ", %%mm3 \n\t" /*0001000*/\
  1557. "movd 8" #dst ", %%mm4 \n\t" /*0001000*/\
  1558. "psrlq $24, %%mm3 \n\t"\
  1559. "psllq $56, %%mm4 \n\t"\
  1560. "por %%mm3, %%mm1 \n\t"\
  1561. "por %%mm4, %%mm2 \n\t"\
  1562. "movq %%mm1, %%mm5 \n\t"\
  1563. PAVGB(%%mm2, %%mm1)\
  1564. "movq " #src "(%%eax), %%mm0 \n\t"\
  1565. PAVGB(%%mm1, %%mm0)\
  1566. "psllq $8, %%mm5 \n\t"\
  1567. "psrlq $8, %%mm2 \n\t"\
  1568. "por %%mm3, %%mm5 \n\t"\
  1569. "por %%mm4, %%mm2 \n\t"\
  1570. "movq %%mm5, %%mm1 \n\t"\
  1571. PAVGB(%%mm2, %%mm5)\
  1572. "psllq $8, %%mm1 \n\t"\
  1573. "psrlq $8, %%mm2 \n\t"\
  1574. "por %%mm3, %%mm1 \n\t"\
  1575. "por %%mm4, %%mm2 \n\t"\
  1576. PAVGB(%%mm2, %%mm1)\
  1577. PAVGB(%%mm1, %%mm5)\
  1578. PAVGB(%%mm5, %%mm0)\
  1579. "movd %%mm0, " #dst " \n\t"\
  1580. "psrlq $32, %%mm0 \n\t"\
  1581. "movd %%mm0, 4" #dst " \n\t"
  1582. /* uses the 9-Tap Filter: 112242211 */
  1583. #define NEW_HLP2(i)\
  1584. "movq " #i "(%%eax), %%mm0 \n\t" /*0001000*/\
  1585. "movq %%mm0, %%mm1 \n\t" /*0001000*/\
  1586. "movq %%mm0, %%mm2 \n\t" /*0001000*/\
  1587. "movd -4(%0), %%mm3 \n\t" /*0001000*/\
  1588. "movd 8(%0), %%mm4 \n\t" /*0001000*/\
  1589. "psllq $8, %%mm1 \n\t"\
  1590. "psrlq $8, %%mm2 \n\t"\
  1591. "psrlq $24, %%mm3 \n\t"\
  1592. "psllq $56, %%mm4 \n\t"\
  1593. "por %%mm3, %%mm1 \n\t" /*0010000*/\
  1594. "por %%mm4, %%mm2 \n\t" /*0000100*/\
  1595. "movq %%mm1, %%mm5 \n\t" /*0010000*/\
  1596. PAVGB(%%mm2, %%mm1) /*0010100*/\
  1597. PAVGB(%%mm1, %%mm0) /*0012100*/\
  1598. "psllq $8, %%mm5 \n\t"\
  1599. "psrlq $8, %%mm2 \n\t"\
  1600. "por %%mm3, %%mm5 \n\t" /*0100000*/\
  1601. "por %%mm4, %%mm2 \n\t" /*0000010*/\
  1602. "movq %%mm5, %%mm1 \n\t" /*0100000*/\
  1603. PAVGB(%%mm2, %%mm5) /*0100010*/\
  1604. "psllq $8, %%mm1 \n\t"\
  1605. "psrlq $8, %%mm2 \n\t"\
  1606. "por %%mm3, %%mm1 \n\t" /*1000000*/\
  1607. "por %%mm4, %%mm2 \n\t" /*0000001*/\
  1608. "movq %%mm1, %%mm6 \n\t" /*1000000*/\
  1609. PAVGB(%%mm2, %%mm1) /*1000001*/\
  1610. "psllq $8, %%mm6 \n\t"\
  1611. "psrlq $8, %%mm2 \n\t"\
  1612. "por %%mm3, %%mm6 \n\t"/*100000000*/\
  1613. "por %%mm4, %%mm2 \n\t"/*000000001*/\
  1614. PAVGB(%%mm2, %%mm6) /*100000001*/\
  1615. PAVGB(%%mm6, %%mm1) /*110000011*/\
  1616. PAVGB(%%mm1, %%mm5) /*112000211*/\
  1617. PAVGB(%%mm5, %%mm0) /*112242211*/\
  1618. "movd %%mm0, (%0) \n\t"\
  1619. "psrlq $32, %%mm0 \n\t"\
  1620. "movd %%mm0, 4(%0) \n\t"
  1621. #define HLP(src, dst) NEW_HLP(src, dst)
  1622. HLP(0, (%0))
  1623. HLP(8, (%%ecx))
  1624. HLP(16, (%%ecx, %1))
  1625. HLP(24, (%%ecx, %1, 2))
  1626. HLP(32, (%0, %1, 4))
  1627. HLP(40, (%%ebx))
  1628. HLP(48, (%%ebx, %1))
  1629. HLP(56, (%%ebx, %1, 2))
  1630. :
  1631. : "r" (dst), "r" (stride)
  1632. : "%eax", "%ebx", "%ecx"
  1633. );
  1634. #else
  1635. int y;
  1636. for(y=0; y<BLOCK_SIZE; y++)
  1637. {
  1638. const int first= ABS(dst[-1] - dst[0]) < QP ? dst[-1] : dst[0];
  1639. const int last= ABS(dst[8] - dst[7]) < QP ? dst[8] : dst[7];
  1640. int sums[9];
  1641. sums[0] = first + dst[0];
  1642. sums[1] = dst[0] + dst[1];
  1643. sums[2] = dst[1] + dst[2];
  1644. sums[3] = dst[2] + dst[3];
  1645. sums[4] = dst[3] + dst[4];
  1646. sums[5] = dst[4] + dst[5];
  1647. sums[6] = dst[5] + dst[6];
  1648. sums[7] = dst[6] + dst[7];
  1649. sums[8] = dst[7] + last;
  1650. dst[0]= ((sums[0]<<2) + ((first + sums[2])<<1) + sums[4] + 8)>>4;
  1651. dst[1]= ((dst[1]<<2) + ((first + sums[0] + sums[3])<<1) + sums[5] + 8)>>4;
  1652. dst[2]= ((dst[2]<<2) + ((first + sums[1] + sums[4])<<1) + sums[6] + 8)>>4;
  1653. dst[3]= ((dst[3]<<2) + ((sums[2] + sums[5])<<1) + sums[0] + sums[7] + 8)>>4;
  1654. dst[4]= ((dst[4]<<2) + ((sums[3] + sums[6])<<1) + sums[1] + sums[8] + 8)>>4;
  1655. dst[5]= ((dst[5]<<2) + ((last + sums[7] + sums[4])<<1) + sums[2] + 8)>>4;
  1656. dst[6]= (((last + dst[6])<<2) + ((dst[7] + sums[5])<<1) + sums[3] + 8)>>4;
  1657. dst[7]= ((sums[8]<<2) + ((last + sums[6])<<1) + sums[4] + 8)>>4;
  1658. dst+= stride;
  1659. }
  1660. #endif
  1661. }
  1662. static inline void dering(uint8_t src[], int stride, int QP)
  1663. {
  1664. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1665. asm volatile(
  1666. "movq pQPb, %%mm0 \n\t"
  1667. "paddusb %%mm0, %%mm0 \n\t"
  1668. "movq %%mm0, pQPb2 \n\t"
  1669. "leal (%0, %1), %%eax \n\t"
  1670. "leal (%%eax, %1, 4), %%ebx \n\t"
  1671. // 0 1 2 3 4 5 6 7 8 9
  1672. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1673. "pcmpeqb %%mm6, %%mm6 \n\t"
  1674. "pxor %%mm7, %%mm7 \n\t"
  1675. #ifdef HAVE_MMX2
  1676. #define FIND_MIN_MAX(addr)\
  1677. "movq " #addr ", %%mm0 \n\t"\
  1678. "pminub %%mm0, %%mm6 \n\t"\
  1679. "pmaxub %%mm0, %%mm7 \n\t"
  1680. #else
  1681. #define FIND_MIN_MAX(addr)\
  1682. "movq " #addr ", %%mm0 \n\t"\
  1683. "movq %%mm6, %%mm1 \n\t"\
  1684. "psubusb %%mm0, %%mm7 \n\t"\
  1685. "paddb %%mm0, %%mm7 \n\t"\
  1686. "psubusb %%mm0, %%mm1 \n\t"\
  1687. "psubb %%mm1, %%mm6 \n\t"
  1688. #endif
  1689. FIND_MIN_MAX((%%eax))
  1690. FIND_MIN_MAX((%%eax, %1))
  1691. FIND_MIN_MAX((%%eax, %1, 2))
  1692. FIND_MIN_MAX((%0, %1, 4))
  1693. FIND_MIN_MAX((%%ebx))
  1694. FIND_MIN_MAX((%%ebx, %1))
  1695. FIND_MIN_MAX((%%ebx, %1, 2))
  1696. FIND_MIN_MAX((%0, %1, 8))
  1697. "movq %%mm6, %%mm4 \n\t"
  1698. "psrlq $8, %%mm6 \n\t"
  1699. #ifdef HAVE_MMX2
  1700. "pminub %%mm4, %%mm6 \n\t" // min of pixels
  1701. "pshufw $0xF9, %%mm6, %%mm4 \n\t"
  1702. "pminub %%mm4, %%mm6 \n\t" // min of pixels
  1703. "pshufw $0xFE, %%mm6, %%mm4 \n\t"
  1704. "pminub %%mm4, %%mm6 \n\t"
  1705. #else
  1706. "movq %%mm6, %%mm1 \n\t"
  1707. "psubusb %%mm4, %%mm1 \n\t"
  1708. "psubb %%mm1, %%mm6 \n\t"
  1709. "movq %%mm6, %%mm4 \n\t"
  1710. "psrlq $16, %%mm6 \n\t"
  1711. "movq %%mm6, %%mm1 \n\t"
  1712. "psubusb %%mm4, %%mm1 \n\t"
  1713. "psubb %%mm1, %%mm6 \n\t"
  1714. "movq %%mm6, %%mm4 \n\t"
  1715. "psrlq $32, %%mm6 \n\t"
  1716. "movq %%mm6, %%mm1 \n\t"
  1717. "psubusb %%mm4, %%mm1 \n\t"
  1718. "psubb %%mm1, %%mm6 \n\t"
  1719. #endif
  1720. "movq %%mm7, %%mm4 \n\t"
  1721. "psrlq $8, %%mm7 \n\t"
  1722. #ifdef HAVE_MMX2
  1723. "pmaxub %%mm4, %%mm7 \n\t" // max of pixels
  1724. "pshufw $0xF9, %%mm7, %%mm4 \n\t"
  1725. "pmaxub %%mm4, %%mm7 \n\t"
  1726. "pshufw $0xFE, %%mm7, %%mm4 \n\t"
  1727. "pmaxub %%mm4, %%mm7 \n\t"
  1728. #else
  1729. "psubusb %%mm4, %%mm7 \n\t"
  1730. "paddb %%mm4, %%mm7 \n\t"
  1731. "movq %%mm7, %%mm4 \n\t"
  1732. "psrlq $16, %%mm7 \n\t"
  1733. "psubusb %%mm4, %%mm7 \n\t"
  1734. "paddb %%mm4, %%mm7 \n\t"
  1735. "movq %%mm7, %%mm4 \n\t"
  1736. "psrlq $32, %%mm7 \n\t"
  1737. "psubusb %%mm4, %%mm7 \n\t"
  1738. "paddb %%mm4, %%mm7 \n\t"
  1739. #endif
  1740. PAVGB(%%mm6, %%mm7) // a=(max + min)/2
  1741. "punpcklbw %%mm7, %%mm7 \n\t"
  1742. "punpcklbw %%mm7, %%mm7 \n\t"
  1743. "punpcklbw %%mm7, %%mm7 \n\t"
  1744. "movq %%mm7, temp0 \n\t"
  1745. "movq (%0), %%mm0 \n\t" // L10
  1746. "movq %%mm0, %%mm1 \n\t" // L10
  1747. "movq %%mm0, %%mm2 \n\t" // L10
  1748. "psllq $8, %%mm1 \n\t"
  1749. "psrlq $8, %%mm2 \n\t"
  1750. "movd -4(%0), %%mm3 \n\t"
  1751. "movd 8(%0), %%mm4 \n\t"
  1752. "psrlq $24, %%mm3 \n\t"
  1753. "psllq $56, %%mm4 \n\t"
  1754. "por %%mm3, %%mm1 \n\t" // L00
  1755. "por %%mm4, %%mm2 \n\t" // L20
  1756. "movq %%mm1, %%mm3 \n\t" // L00
  1757. PAVGB(%%mm2, %%mm1) // (L20 + L00)/2
  1758. PAVGB(%%mm0, %%mm1) // (L20 + L00 + 2L10)/4
  1759. "psubusb %%mm7, %%mm0 \n\t"
  1760. "psubusb %%mm7, %%mm2 \n\t"
  1761. "psubusb %%mm7, %%mm3 \n\t"
  1762. "pcmpeqb b00, %%mm0 \n\t" // L10 > a ? 0 : -1
  1763. "pcmpeqb b00, %%mm2 \n\t" // L20 > a ? 0 : -1
  1764. "pcmpeqb b00, %%mm3 \n\t" // L00 > a ? 0 : -1
  1765. "paddb %%mm2, %%mm0 \n\t"
  1766. "paddb %%mm3, %%mm0 \n\t"
  1767. "movq (%%eax), %%mm2 \n\t" // L11
  1768. "movq %%mm2, %%mm3 \n\t" // L11
  1769. "movq %%mm2, %%mm4 \n\t" // L11
  1770. "psllq $8, %%mm3 \n\t"
  1771. "psrlq $8, %%mm4 \n\t"
  1772. "movd -4(%%eax), %%mm5 \n\t"
  1773. "movd 8(%%eax), %%mm6 \n\t"
  1774. "psrlq $24, %%mm5 \n\t"
  1775. "psllq $56, %%mm6 \n\t"
  1776. "por %%mm5, %%mm3 \n\t" // L01
  1777. "por %%mm6, %%mm4 \n\t" // L21
  1778. "movq %%mm3, %%mm5 \n\t" // L01
  1779. PAVGB(%%mm4, %%mm3) // (L21 + L01)/2
  1780. PAVGB(%%mm2, %%mm3) // (L21 + L01 + 2L11)/4
  1781. "psubusb %%mm7, %%mm2 \n\t"
  1782. "psubusb %%mm7, %%mm4 \n\t"
  1783. "psubusb %%mm7, %%mm5 \n\t"
  1784. "pcmpeqb b00, %%mm2 \n\t" // L11 > a ? 0 : -1
  1785. "pcmpeqb b00, %%mm4 \n\t" // L21 > a ? 0 : -1
  1786. "pcmpeqb b00, %%mm5 \n\t" // L01 > a ? 0 : -1
  1787. "paddb %%mm4, %%mm2 \n\t"
  1788. "paddb %%mm5, %%mm2 \n\t"
  1789. // 0, 2, 3, 1
  1790. #define DERING_CORE(dst,src,ppsx,psx,sx,pplx,plx,lx,t0,t1) \
  1791. "movq " #src ", " #sx " \n\t" /* src[0] */\
  1792. "movq " #sx ", " #lx " \n\t" /* src[0] */\
  1793. "movq " #sx ", " #t0 " \n\t" /* src[0] */\
  1794. "psllq $8, " #lx " \n\t"\
  1795. "psrlq $8, " #t0 " \n\t"\
  1796. "movd -4" #src ", " #t1 " \n\t"\
  1797. "psrlq $24, " #t1 " \n\t"\
  1798. "por " #t1 ", " #lx " \n\t" /* src[-1] */\
  1799. "movd 8" #src ", " #t1 " \n\t"\
  1800. "psllq $56, " #t1 " \n\t"\
  1801. "por " #t1 ", " #t0 " \n\t" /* src[+1] */\
  1802. "movq " #lx ", " #t1 " \n\t" /* src[-1] */\
  1803. PAVGB(t0, lx) /* (src[-1] + src[+1])/2 */\
  1804. PAVGB(sx, lx) /* (src[-1] + 2src[0] + src[+1])/4 */\
  1805. PAVGB(lx, pplx) \
  1806. "movq " #lx ", temp1 \n\t"\
  1807. "movq temp0, " #lx " \n\t"\
  1808. "psubusb " #lx ", " #t1 " \n\t"\
  1809. "psubusb " #lx ", " #t0 " \n\t"\
  1810. "psubusb " #lx ", " #sx " \n\t"\
  1811. "movq b00, " #lx " \n\t"\
  1812. "pcmpeqb " #lx ", " #t1 " \n\t" /* src[-1] > a ? 0 : -1*/\
  1813. "pcmpeqb " #lx ", " #t0 " \n\t" /* src[+1] > a ? 0 : -1*/\
  1814. "pcmpeqb " #lx ", " #sx " \n\t" /* src[0] > a ? 0 : -1*/\
  1815. "paddb " #t1 ", " #t0 " \n\t"\
  1816. "paddb " #t0 ", " #sx " \n\t"\
  1817. \
  1818. PAVGB(plx, pplx) /* filtered */\
  1819. "movq " #dst ", " #t0 " \n\t" /* dst */\
  1820. "movq " #t0 ", " #t1 " \n\t" /* dst */\
  1821. "psubusb pQPb2, " #t0 " \n\t"\
  1822. "paddusb pQPb2, " #t1 " \n\t"\
  1823. PMAXUB(t0, pplx)\
  1824. PMINUB(t1, pplx, t0)\
  1825. "paddb " #sx ", " #ppsx " \n\t"\
  1826. "paddb " #psx ", " #ppsx " \n\t"\
  1827. "#paddb b02, " #ppsx " \n\t"\
  1828. "pand b08, " #ppsx " \n\t"\
  1829. "pcmpeqb " #lx ", " #ppsx " \n\t"\
  1830. "pand " #ppsx ", " #pplx " \n\t"\
  1831. "pandn " #dst ", " #ppsx " \n\t"\
  1832. "por " #pplx ", " #ppsx " \n\t"\
  1833. "movq " #ppsx ", " #dst " \n\t"\
  1834. "movq temp1, " #lx " \n\t"
  1835. /*
  1836. 0000000
  1837. 1111111
  1838. 1111110
  1839. 1111101
  1840. 1111100
  1841. 1111011
  1842. 1111010
  1843. 1111001
  1844. 1111000
  1845. 1110111
  1846. */
  1847. //DERING_CORE(dst,src ,ppsx ,psx ,sx ,pplx ,plx ,lx ,t0 ,t1)
  1848. DERING_CORE((%%eax),(%%eax, %1) ,%%mm0,%%mm2,%%mm4,%%mm1,%%mm3,%%mm5,%%mm6,%%mm7)
  1849. DERING_CORE((%%eax, %1),(%%eax, %1, 2) ,%%mm2,%%mm4,%%mm0,%%mm3,%%mm5,%%mm1,%%mm6,%%mm7)
  1850. DERING_CORE((%%eax, %1, 2),(%0, %1, 4) ,%%mm4,%%mm0,%%mm2,%%mm5,%%mm1,%%mm3,%%mm6,%%mm7)
  1851. DERING_CORE((%0, %1, 4),(%%ebx) ,%%mm0,%%mm2,%%mm4,%%mm1,%%mm3,%%mm5,%%mm6,%%mm7)
  1852. DERING_CORE((%%ebx),(%%ebx, %1) ,%%mm2,%%mm4,%%mm0,%%mm3,%%mm5,%%mm1,%%mm6,%%mm7)
  1853. DERING_CORE((%%ebx, %1), (%%ebx, %1, 2),%%mm4,%%mm0,%%mm2,%%mm5,%%mm1,%%mm3,%%mm6,%%mm7)
  1854. DERING_CORE((%%ebx, %1, 2),(%0, %1, 8) ,%%mm0,%%mm2,%%mm4,%%mm1,%%mm3,%%mm5,%%mm6,%%mm7)
  1855. DERING_CORE((%0, %1, 8),(%%ebx, %1, 4) ,%%mm2,%%mm4,%%mm0,%%mm3,%%mm5,%%mm1,%%mm6,%%mm7)
  1856. : : "r" (src), "r" (stride), "r" (QP)
  1857. : "%eax", "%ebx"
  1858. );
  1859. #else
  1860. int y;
  1861. int min=255;
  1862. int max=0;
  1863. int avg;
  1864. uint8_t *p;
  1865. int s[10];
  1866. for(y=1; y<9; y++)
  1867. {
  1868. int x;
  1869. p= src + stride*y;
  1870. for(x=1; x<9; x++)
  1871. {
  1872. p++;
  1873. if(*p > max) max= *p;
  1874. if(*p < min) min= *p;
  1875. }
  1876. }
  1877. avg= (min + max + 1)/2;
  1878. for(y=0; y<10; y++)
  1879. {
  1880. int x;
  1881. int t = 0;
  1882. p= src + stride*y;
  1883. for(x=0; x<10; x++)
  1884. {
  1885. if(*p > avg) t |= (1<<x);
  1886. p++;
  1887. }
  1888. t |= (~t)<<16;
  1889. t &= (t<<1) & (t>>1);
  1890. s[y] = t;
  1891. }
  1892. for(y=1; y<9; y++)
  1893. {
  1894. int x;
  1895. int t = s[y-1] & s[y] & s[y+1];
  1896. t|= t>>16;
  1897. p= src + stride*y;
  1898. for(x=1; x<9; x++)
  1899. {
  1900. p++;
  1901. if(t & (1<<x))
  1902. {
  1903. int f= (*(p-stride-1)) + 2*(*(p-stride)) + (*(p-stride+1))
  1904. +2*(*(p -1)) + 4*(*p ) + 2*(*(p +1))
  1905. +(*(p+stride-1)) + 2*(*(p+stride)) + (*(p+stride+1));
  1906. f= (f + 8)>>4;
  1907. if (*p + 2*QP < f) *p= *p + 2*QP;
  1908. else if(*p - 2*QP > f) *p= *p - 2*QP;
  1909. else *p=f;
  1910. }
  1911. }
  1912. }
  1913. #endif
  1914. }
  1915. /**
  1916. * Deinterlaces the given block
  1917. * will be called for every 8x8 block, and can read & write into an 8x16 block
  1918. */
  1919. static inline void deInterlaceInterpolateLinear(uint8_t src[], int stride)
  1920. {
  1921. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1922. asm volatile(
  1923. "leal (%0, %1), %%eax \n\t"
  1924. "leal (%%eax, %1, 4), %%ebx \n\t"
  1925. // 0 1 2 3 4 5 6 7 8 9
  1926. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1927. "movq (%0), %%mm0 \n\t"
  1928. "movq (%%eax, %1), %%mm1 \n\t"
  1929. PAVGB(%%mm1, %%mm0)
  1930. "movq %%mm0, (%%eax) \n\t"
  1931. "movq (%0, %1, 4), %%mm0 \n\t"
  1932. PAVGB(%%mm0, %%mm1)
  1933. "movq %%mm1, (%%eax, %1, 2) \n\t"
  1934. "movq (%%ebx, %1), %%mm1 \n\t"
  1935. PAVGB(%%mm1, %%mm0)
  1936. "movq %%mm0, (%%ebx) \n\t"
  1937. "movq (%0, %1, 8), %%mm0 \n\t"
  1938. PAVGB(%%mm0, %%mm1)
  1939. "movq %%mm1, (%%ebx, %1, 2) \n\t"
  1940. : : "r" (src), "r" (stride)
  1941. : "%eax", "%ebx"
  1942. );
  1943. #else
  1944. int x;
  1945. for(x=0; x<8; x++)
  1946. {
  1947. src[stride] = (src[0] + src[stride*2])>>1;
  1948. src[stride*3] = (src[stride*2] + src[stride*4])>>1;
  1949. src[stride*5] = (src[stride*4] + src[stride*6])>>1;
  1950. src[stride*7] = (src[stride*6] + src[stride*8])>>1;
  1951. src++;
  1952. }
  1953. #endif
  1954. }
  1955. /**
  1956. * Deinterlaces the given block
  1957. * will be called for every 8x8 block, and can read & write into an 8x16 block
  1958. * no cliping in C version
  1959. */
  1960. static inline void deInterlaceInterpolateCubic(uint8_t src[], int stride)
  1961. {
  1962. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1963. asm volatile(
  1964. "leal (%0, %1), %%eax \n\t"
  1965. "leal (%%eax, %1, 4), %%ebx \n\t"
  1966. "leal (%%ebx, %1, 4), %%ecx \n\t"
  1967. "addl %1, %%ecx \n\t"
  1968. "pxor %%mm7, %%mm7 \n\t"
  1969. // 0 1 2 3 4 5 6 7 8 9 10
  1970. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1 ecx
  1971. #define DEINT_CUBIC(a,b,c,d,e)\
  1972. "movq " #a ", %%mm0 \n\t"\
  1973. "movq " #b ", %%mm1 \n\t"\
  1974. "movq " #d ", %%mm2 \n\t"\
  1975. "movq " #e ", %%mm3 \n\t"\
  1976. PAVGB(%%mm2, %%mm1) /* (b+d) /2 */\
  1977. PAVGB(%%mm3, %%mm0) /* a(a+e) /2 */\
  1978. "movq %%mm0, %%mm2 \n\t"\
  1979. "punpcklbw %%mm7, %%mm0 \n\t"\
  1980. "punpckhbw %%mm7, %%mm2 \n\t"\
  1981. "movq %%mm1, %%mm3 \n\t"\
  1982. "punpcklbw %%mm7, %%mm1 \n\t"\
  1983. "punpckhbw %%mm7, %%mm3 \n\t"\
  1984. "psubw %%mm1, %%mm0 \n\t" /* L(a+e - (b+d))/2 */\
  1985. "psubw %%mm3, %%mm2 \n\t" /* H(a+e - (b+d))/2 */\
  1986. "psraw $3, %%mm0 \n\t" /* L(a+e - (b+d))/16 */\
  1987. "psraw $3, %%mm2 \n\t" /* H(a+e - (b+d))/16 */\
  1988. "psubw %%mm0, %%mm1 \n\t" /* L(9b + 9d - a - e)/16 */\
  1989. "psubw %%mm2, %%mm3 \n\t" /* H(9b + 9d - a - e)/16 */\
  1990. "packuswb %%mm3, %%mm1 \n\t"\
  1991. "movq %%mm1, " #c " \n\t"
  1992. DEINT_CUBIC((%0), (%%eax, %1), (%%eax, %1, 2), (%0, %1, 4), (%%ebx, %1))
  1993. DEINT_CUBIC((%%eax, %1), (%0, %1, 4), (%%ebx), (%%ebx, %1), (%0, %1, 8))
  1994. DEINT_CUBIC((%0, %1, 4), (%%ebx, %1), (%%ebx, %1, 2), (%0, %1, 8), (%%ecx))
  1995. DEINT_CUBIC((%%ebx, %1), (%0, %1, 8), (%%ebx, %1, 4), (%%ecx), (%%ecx, %1, 2))
  1996. : : "r" (src), "r" (stride)
  1997. : "%eax", "%ebx", "ecx"
  1998. );
  1999. #else
  2000. int x;
  2001. for(x=0; x<8; x++)
  2002. {
  2003. src[stride*3] = (-src[0] + 9*src[stride*2] + 9*src[stride*4] - src[stride*6])>>4;
  2004. src[stride*5] = (-src[stride*2] + 9*src[stride*4] + 9*src[stride*6] - src[stride*8])>>4;
  2005. src[stride*7] = (-src[stride*4] + 9*src[stride*6] + 9*src[stride*8] - src[stride*10])>>4;
  2006. src[stride*9] = (-src[stride*6] + 9*src[stride*8] + 9*src[stride*10] - src[stride*12])>>4;
  2007. src++;
  2008. }
  2009. #endif
  2010. }
  2011. /**
  2012. * Deinterlaces the given block
  2013. * will be called for every 8x8 block, and can read & write into an 8x16 block
  2014. * will shift the image up by 1 line (FIXME if this is a problem)
  2015. */
  2016. static inline void deInterlaceBlendLinear(uint8_t src[], int stride)
  2017. {
  2018. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  2019. asm volatile(
  2020. "leal (%0, %1), %%eax \n\t"
  2021. "leal (%%eax, %1, 4), %%ebx \n\t"
  2022. // 0 1 2 3 4 5 6 7 8 9
  2023. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  2024. "movq (%0), %%mm0 \n\t" // L0
  2025. "movq (%%eax, %1), %%mm1 \n\t" // L2
  2026. PAVGB(%%mm1, %%mm0) // L0+L2
  2027. "movq (%%eax), %%mm2 \n\t" // L1
  2028. PAVGB(%%mm2, %%mm0)
  2029. "movq %%mm0, (%0) \n\t"
  2030. "movq (%%eax, %1, 2), %%mm0 \n\t" // L3
  2031. PAVGB(%%mm0, %%mm2) // L1+L3
  2032. PAVGB(%%mm1, %%mm2) // 2L2 + L1 + L3
  2033. "movq %%mm2, (%%eax) \n\t"
  2034. "movq (%0, %1, 4), %%mm2 \n\t" // L4
  2035. PAVGB(%%mm2, %%mm1) // L2+L4
  2036. PAVGB(%%mm0, %%mm1) // 2L3 + L2 + L4
  2037. "movq %%mm1, (%%eax, %1) \n\t"
  2038. "movq (%%ebx), %%mm1 \n\t" // L5
  2039. PAVGB(%%mm1, %%mm0) // L3+L5
  2040. PAVGB(%%mm2, %%mm0) // 2L4 + L3 + L5
  2041. "movq %%mm0, (%%eax, %1, 2) \n\t"
  2042. "movq (%%ebx, %1), %%mm0 \n\t" // L6
  2043. PAVGB(%%mm0, %%mm2) // L4+L6
  2044. PAVGB(%%mm1, %%mm2) // 2L5 + L4 + L6
  2045. "movq %%mm2, (%0, %1, 4) \n\t"
  2046. "movq (%%ebx, %1, 2), %%mm2 \n\t" // L7
  2047. PAVGB(%%mm2, %%mm1) // L5+L7
  2048. PAVGB(%%mm0, %%mm1) // 2L6 + L5 + L7
  2049. "movq %%mm1, (%%ebx) \n\t"
  2050. "movq (%0, %1, 8), %%mm1 \n\t" // L8
  2051. PAVGB(%%mm1, %%mm0) // L6+L8
  2052. PAVGB(%%mm2, %%mm0) // 2L7 + L6 + L8
  2053. "movq %%mm0, (%%ebx, %1) \n\t"
  2054. "movq (%%ebx, %1, 4), %%mm0 \n\t" // L9
  2055. PAVGB(%%mm0, %%mm2) // L7+L9
  2056. PAVGB(%%mm1, %%mm2) // 2L8 + L7 + L9
  2057. "movq %%mm2, (%%ebx, %1, 2) \n\t"
  2058. : : "r" (src), "r" (stride)
  2059. : "%eax", "%ebx"
  2060. );
  2061. #else
  2062. int x;
  2063. for(x=0; x<8; x++)
  2064. {
  2065. src[0 ] = (src[0 ] + 2*src[stride ] + src[stride*2])>>2;
  2066. src[stride ] = (src[stride ] + 2*src[stride*2] + src[stride*3])>>2;
  2067. src[stride*2] = (src[stride*2] + 2*src[stride*3] + src[stride*4])>>2;
  2068. src[stride*3] = (src[stride*3] + 2*src[stride*4] + src[stride*5])>>2;
  2069. src[stride*4] = (src[stride*4] + 2*src[stride*5] + src[stride*6])>>2;
  2070. src[stride*5] = (src[stride*5] + 2*src[stride*6] + src[stride*7])>>2;
  2071. src[stride*6] = (src[stride*6] + 2*src[stride*7] + src[stride*8])>>2;
  2072. src[stride*7] = (src[stride*7] + 2*src[stride*8] + src[stride*9])>>2;
  2073. src++;
  2074. }
  2075. #endif
  2076. }
  2077. /**
  2078. * Deinterlaces the given block
  2079. * will be called for every 8x8 block, except the last row, and can read & write into an 8x16 block
  2080. */
  2081. static inline void deInterlaceMedian(uint8_t src[], int stride)
  2082. {
  2083. #ifdef HAVE_MMX
  2084. #ifdef HAVE_MMX2
  2085. asm volatile(
  2086. "leal (%0, %1), %%eax \n\t"
  2087. "leal (%%eax, %1, 4), %%ebx \n\t"
  2088. // 0 1 2 3 4 5 6 7 8 9
  2089. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  2090. "movq (%0), %%mm0 \n\t" //
  2091. "movq (%%eax, %1), %%mm2 \n\t" //
  2092. "movq (%%eax), %%mm1 \n\t" //
  2093. "movq %%mm0, %%mm3 \n\t"
  2094. "pmaxub %%mm1, %%mm0 \n\t" //
  2095. "pminub %%mm3, %%mm1 \n\t" //
  2096. "pmaxub %%mm2, %%mm1 \n\t" //
  2097. "pminub %%mm1, %%mm0 \n\t"
  2098. "movq %%mm0, (%%eax) \n\t"
  2099. "movq (%0, %1, 4), %%mm0 \n\t" //
  2100. "movq (%%eax, %1, 2), %%mm1 \n\t" //
  2101. "movq %%mm2, %%mm3 \n\t"
  2102. "pmaxub %%mm1, %%mm2 \n\t" //
  2103. "pminub %%mm3, %%mm1 \n\t" //
  2104. "pmaxub %%mm0, %%mm1 \n\t" //
  2105. "pminub %%mm1, %%mm2 \n\t"
  2106. "movq %%mm2, (%%eax, %1, 2) \n\t"
  2107. "movq (%%ebx), %%mm2 \n\t" //
  2108. "movq (%%ebx, %1), %%mm1 \n\t" //
  2109. "movq %%mm2, %%mm3 \n\t"
  2110. "pmaxub %%mm0, %%mm2 \n\t" //
  2111. "pminub %%mm3, %%mm0 \n\t" //
  2112. "pmaxub %%mm1, %%mm0 \n\t" //
  2113. "pminub %%mm0, %%mm2 \n\t"
  2114. "movq %%mm2, (%%ebx) \n\t"
  2115. "movq (%%ebx, %1, 2), %%mm2 \n\t" //
  2116. "movq (%0, %1, 8), %%mm0 \n\t" //
  2117. "movq %%mm2, %%mm3 \n\t"
  2118. "pmaxub %%mm0, %%mm2 \n\t" //
  2119. "pminub %%mm3, %%mm0 \n\t" //
  2120. "pmaxub %%mm1, %%mm0 \n\t" //
  2121. "pminub %%mm0, %%mm2 \n\t"
  2122. "movq %%mm2, (%%ebx, %1, 2) \n\t"
  2123. : : "r" (src), "r" (stride)
  2124. : "%eax", "%ebx"
  2125. );
  2126. #else // MMX without MMX2
  2127. asm volatile(
  2128. "leal (%0, %1), %%eax \n\t"
  2129. "leal (%%eax, %1, 4), %%ebx \n\t"
  2130. // 0 1 2 3 4 5 6 7 8 9
  2131. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  2132. "pxor %%mm7, %%mm7 \n\t"
  2133. #define MEDIAN(a,b,c)\
  2134. "movq " #a ", %%mm0 \n\t"\
  2135. "movq " #b ", %%mm2 \n\t"\
  2136. "movq " #c ", %%mm1 \n\t"\
  2137. "movq %%mm0, %%mm3 \n\t"\
  2138. "movq %%mm1, %%mm4 \n\t"\
  2139. "movq %%mm2, %%mm5 \n\t"\
  2140. "psubusb %%mm1, %%mm3 \n\t"\
  2141. "psubusb %%mm2, %%mm4 \n\t"\
  2142. "psubusb %%mm0, %%mm5 \n\t"\
  2143. "pcmpeqb %%mm7, %%mm3 \n\t"\
  2144. "pcmpeqb %%mm7, %%mm4 \n\t"\
  2145. "pcmpeqb %%mm7, %%mm5 \n\t"\
  2146. "movq %%mm3, %%mm6 \n\t"\
  2147. "pxor %%mm4, %%mm3 \n\t"\
  2148. "pxor %%mm5, %%mm4 \n\t"\
  2149. "pxor %%mm6, %%mm5 \n\t"\
  2150. "por %%mm3, %%mm1 \n\t"\
  2151. "por %%mm4, %%mm2 \n\t"\
  2152. "por %%mm5, %%mm0 \n\t"\
  2153. "pand %%mm2, %%mm0 \n\t"\
  2154. "pand %%mm1, %%mm0 \n\t"\
  2155. "movq %%mm0, " #b " \n\t"
  2156. MEDIAN((%0), (%%eax), (%%eax, %1))
  2157. MEDIAN((%%eax, %1), (%%eax, %1, 2), (%0, %1, 4))
  2158. MEDIAN((%0, %1, 4), (%%ebx), (%%ebx, %1))
  2159. MEDIAN((%%ebx, %1), (%%ebx, %1, 2), (%0, %1, 8))
  2160. : : "r" (src), "r" (stride)
  2161. : "%eax", "%ebx"
  2162. );
  2163. #endif // MMX
  2164. #else
  2165. //FIXME
  2166. int x;
  2167. for(x=0; x<8; x++)
  2168. {
  2169. src[0 ] = (src[0 ] + 2*src[stride ] + src[stride*2])>>2;
  2170. src[stride ] = (src[stride ] + 2*src[stride*2] + src[stride*3])>>2;
  2171. src[stride*2] = (src[stride*2] + 2*src[stride*3] + src[stride*4])>>2;
  2172. src[stride*3] = (src[stride*3] + 2*src[stride*4] + src[stride*5])>>2;
  2173. src[stride*4] = (src[stride*4] + 2*src[stride*5] + src[stride*6])>>2;
  2174. src[stride*5] = (src[stride*5] + 2*src[stride*6] + src[stride*7])>>2;
  2175. src[stride*6] = (src[stride*6] + 2*src[stride*7] + src[stride*8])>>2;
  2176. src[stride*7] = (src[stride*7] + 2*src[stride*8] + src[stride*9])>>2;
  2177. src++;
  2178. }
  2179. #endif
  2180. }
  2181. #ifdef HAVE_MMX
  2182. /**
  2183. * transposes and shift the given 8x8 Block into dst1 and dst2
  2184. */
  2185. static inline void transpose1(uint8_t *dst1, uint8_t *dst2, uint8_t *src, int srcStride)
  2186. {
  2187. asm(
  2188. "leal (%0, %1), %%eax \n\t"
  2189. "leal (%%eax, %1, 4), %%ebx \n\t"
  2190. // 0 1 2 3 4 5 6 7 8 9
  2191. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  2192. "movq (%0), %%mm0 \n\t" // 12345678
  2193. "movq (%%eax), %%mm1 \n\t" // abcdefgh
  2194. "movq %%mm0, %%mm2 \n\t" // 12345678
  2195. "punpcklbw %%mm1, %%mm0 \n\t" // 1a2b3c4d
  2196. "punpckhbw %%mm1, %%mm2 \n\t" // 5e6f7g8h
  2197. "movq (%%eax, %1), %%mm1 \n\t"
  2198. "movq (%%eax, %1, 2), %%mm3 \n\t"
  2199. "movq %%mm1, %%mm4 \n\t"
  2200. "punpcklbw %%mm3, %%mm1 \n\t"
  2201. "punpckhbw %%mm3, %%mm4 \n\t"
  2202. "movq %%mm0, %%mm3 \n\t"
  2203. "punpcklwd %%mm1, %%mm0 \n\t"
  2204. "punpckhwd %%mm1, %%mm3 \n\t"
  2205. "movq %%mm2, %%mm1 \n\t"
  2206. "punpcklwd %%mm4, %%mm2 \n\t"
  2207. "punpckhwd %%mm4, %%mm1 \n\t"
  2208. "movd %%mm0, 128(%2) \n\t"
  2209. "psrlq $32, %%mm0 \n\t"
  2210. "movd %%mm0, 144(%2) \n\t"
  2211. "movd %%mm3, 160(%2) \n\t"
  2212. "psrlq $32, %%mm3 \n\t"
  2213. "movd %%mm3, 176(%2) \n\t"
  2214. "movd %%mm3, 48(%3) \n\t"
  2215. "movd %%mm2, 192(%2) \n\t"
  2216. "movd %%mm2, 64(%3) \n\t"
  2217. "psrlq $32, %%mm2 \n\t"
  2218. "movd %%mm2, 80(%3) \n\t"
  2219. "movd %%mm1, 96(%3) \n\t"
  2220. "psrlq $32, %%mm1 \n\t"
  2221. "movd %%mm1, 112(%3) \n\t"
  2222. "movq (%0, %1, 4), %%mm0 \n\t" // 12345678
  2223. "movq (%%ebx), %%mm1 \n\t" // abcdefgh
  2224. "movq %%mm0, %%mm2 \n\t" // 12345678
  2225. "punpcklbw %%mm1, %%mm0 \n\t" // 1a2b3c4d
  2226. "punpckhbw %%mm1, %%mm2 \n\t" // 5e6f7g8h
  2227. "movq (%%ebx, %1), %%mm1 \n\t"
  2228. "movq (%%ebx, %1, 2), %%mm3 \n\t"
  2229. "movq %%mm1, %%mm4 \n\t"
  2230. "punpcklbw %%mm3, %%mm1 \n\t"
  2231. "punpckhbw %%mm3, %%mm4 \n\t"
  2232. "movq %%mm0, %%mm3 \n\t"
  2233. "punpcklwd %%mm1, %%mm0 \n\t"
  2234. "punpckhwd %%mm1, %%mm3 \n\t"
  2235. "movq %%mm2, %%mm1 \n\t"
  2236. "punpcklwd %%mm4, %%mm2 \n\t"
  2237. "punpckhwd %%mm4, %%mm1 \n\t"
  2238. "movd %%mm0, 132(%2) \n\t"
  2239. "psrlq $32, %%mm0 \n\t"
  2240. "movd %%mm0, 148(%2) \n\t"
  2241. "movd %%mm3, 164(%2) \n\t"
  2242. "psrlq $32, %%mm3 \n\t"
  2243. "movd %%mm3, 180(%2) \n\t"
  2244. "movd %%mm3, 52(%3) \n\t"
  2245. "movd %%mm2, 196(%2) \n\t"
  2246. "movd %%mm2, 68(%3) \n\t"
  2247. "psrlq $32, %%mm2 \n\t"
  2248. "movd %%mm2, 84(%3) \n\t"
  2249. "movd %%mm1, 100(%3) \n\t"
  2250. "psrlq $32, %%mm1 \n\t"
  2251. "movd %%mm1, 116(%3) \n\t"
  2252. :: "r" (src), "r" (srcStride), "r" (dst1), "r" (dst2)
  2253. : "%eax", "%ebx"
  2254. );
  2255. }
  2256. /**
  2257. * transposes the given 8x8 block
  2258. */
  2259. static inline void transpose2(uint8_t *dst, int dstStride, uint8_t *src)
  2260. {
  2261. asm(
  2262. "leal (%0, %1), %%eax \n\t"
  2263. "leal (%%eax, %1, 4), %%ebx \n\t"
  2264. // 0 1 2 3 4 5 6 7 8 9
  2265. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  2266. "movq (%2), %%mm0 \n\t" // 12345678
  2267. "movq 16(%2), %%mm1 \n\t" // abcdefgh
  2268. "movq %%mm0, %%mm2 \n\t" // 12345678
  2269. "punpcklbw %%mm1, %%mm0 \n\t" // 1a2b3c4d
  2270. "punpckhbw %%mm1, %%mm2 \n\t" // 5e6f7g8h
  2271. "movq 32(%2), %%mm1 \n\t"
  2272. "movq 48(%2), %%mm3 \n\t"
  2273. "movq %%mm1, %%mm4 \n\t"
  2274. "punpcklbw %%mm3, %%mm1 \n\t"
  2275. "punpckhbw %%mm3, %%mm4 \n\t"
  2276. "movq %%mm0, %%mm3 \n\t"
  2277. "punpcklwd %%mm1, %%mm0 \n\t"
  2278. "punpckhwd %%mm1, %%mm3 \n\t"
  2279. "movq %%mm2, %%mm1 \n\t"
  2280. "punpcklwd %%mm4, %%mm2 \n\t"
  2281. "punpckhwd %%mm4, %%mm1 \n\t"
  2282. "movd %%mm0, (%0) \n\t"
  2283. "psrlq $32, %%mm0 \n\t"
  2284. "movd %%mm0, (%%eax) \n\t"
  2285. "movd %%mm3, (%%eax, %1) \n\t"
  2286. "psrlq $32, %%mm3 \n\t"
  2287. "movd %%mm3, (%%eax, %1, 2) \n\t"
  2288. "movd %%mm2, (%0, %1, 4) \n\t"
  2289. "psrlq $32, %%mm2 \n\t"
  2290. "movd %%mm2, (%%ebx) \n\t"
  2291. "movd %%mm1, (%%ebx, %1) \n\t"
  2292. "psrlq $32, %%mm1 \n\t"
  2293. "movd %%mm1, (%%ebx, %1, 2) \n\t"
  2294. "movq 64(%2), %%mm0 \n\t" // 12345678
  2295. "movq 80(%2), %%mm1 \n\t" // abcdefgh
  2296. "movq %%mm0, %%mm2 \n\t" // 12345678
  2297. "punpcklbw %%mm1, %%mm0 \n\t" // 1a2b3c4d
  2298. "punpckhbw %%mm1, %%mm2 \n\t" // 5e6f7g8h
  2299. "movq 96(%2), %%mm1 \n\t"
  2300. "movq 112(%2), %%mm3 \n\t"
  2301. "movq %%mm1, %%mm4 \n\t"
  2302. "punpcklbw %%mm3, %%mm1 \n\t"
  2303. "punpckhbw %%mm3, %%mm4 \n\t"
  2304. "movq %%mm0, %%mm3 \n\t"
  2305. "punpcklwd %%mm1, %%mm0 \n\t"
  2306. "punpckhwd %%mm1, %%mm3 \n\t"
  2307. "movq %%mm2, %%mm1 \n\t"
  2308. "punpcklwd %%mm4, %%mm2 \n\t"
  2309. "punpckhwd %%mm4, %%mm1 \n\t"
  2310. "movd %%mm0, 4(%0) \n\t"
  2311. "psrlq $32, %%mm0 \n\t"
  2312. "movd %%mm0, 4(%%eax) \n\t"
  2313. "movd %%mm3, 4(%%eax, %1) \n\t"
  2314. "psrlq $32, %%mm3 \n\t"
  2315. "movd %%mm3, 4(%%eax, %1, 2) \n\t"
  2316. "movd %%mm2, 4(%0, %1, 4) \n\t"
  2317. "psrlq $32, %%mm2 \n\t"
  2318. "movd %%mm2, 4(%%ebx) \n\t"
  2319. "movd %%mm1, 4(%%ebx, %1) \n\t"
  2320. "psrlq $32, %%mm1 \n\t"
  2321. "movd %%mm1, 4(%%ebx, %1, 2) \n\t"
  2322. :: "r" (dst), "r" (dstStride), "r" (src)
  2323. : "%eax", "%ebx"
  2324. );
  2325. }
  2326. #endif
  2327. #ifdef HAVE_ODIVX_POSTPROCESS
  2328. #include "../opendivx/postprocess.h"
  2329. int use_old_pp=0;
  2330. #endif
  2331. static void postProcess(uint8_t src[], int srcStride, uint8_t dst[], int dstStride, int width, int height,
  2332. QP_STORE_T QPs[], int QPStride, int isColor, int mode);
  2333. /* -pp Command line Help
  2334. NOTE/FIXME: put this at an appropriate place (--help, html docs, man mplayer)?
  2335. -pp <filterName>[:<option>[:<option>...]][,[-]<filterName>[:<option>...]]...
  2336. long form example:
  2337. -pp vdeblock:autoq,hdeblock:autoq,linblenddeint -pp default,-vdeblock
  2338. short form example:
  2339. -pp vb:a,hb:a,lb -pp de,-vb
  2340. Filters Options
  2341. short long name short long option Description
  2342. * * a autoq cpu power dependant enabler
  2343. c chrom chrominance filtring enabled
  2344. y nochrom chrominance filtring disabled
  2345. hb hdeblock horizontal deblocking filter
  2346. vb vdeblock vertical deblocking filter
  2347. vr rkvdeblock
  2348. h1 x1hdeblock Experimental horizontal deblock filter 1
  2349. v1 x1vdeblock Experimental vertical deblock filter 1
  2350. dr dering not implemented yet
  2351. al autolevels automatic brightness / contrast fixer
  2352. f fullyrange stretch luminance range to (0..255)
  2353. lb linblenddeint linear blend deinterlacer
  2354. li linipoldeint linear interpolating deinterlacer
  2355. ci cubicipoldeint cubic interpolating deinterlacer
  2356. md mediandeint median deinterlacer
  2357. de default hdeblock:a,vdeblock:a,dering:a,autolevels
  2358. fa fast x1hdeblock:a,x1vdeblock:a,dering:a,autolevels
  2359. */
  2360. /**
  2361. * returns a PPMode struct which will have a non 0 error variable if an error occured
  2362. * name is the string after "-pp" on the command line
  2363. * quality is a number from 0 to GET_PP_QUALITY_MAX
  2364. */
  2365. struct PPMode getPPModeByNameAndQuality(char *name, int quality)
  2366. {
  2367. char temp[GET_MODE_BUFFER_SIZE];
  2368. char *p= temp;
  2369. char *filterDelimiters= ",";
  2370. char *optionDelimiters= ":";
  2371. struct PPMode ppMode= {0,0,0,0,0,0};
  2372. char *filterToken;
  2373. strncpy(temp, name, GET_MODE_BUFFER_SIZE);
  2374. for(;;){
  2375. char *filterName;
  2376. int q= GET_PP_QUALITY_MAX;
  2377. int chrom=-1;
  2378. char *option;
  2379. char *options[OPTIONS_ARRAY_SIZE];
  2380. int i;
  2381. int filterNameOk=0;
  2382. int numOfUnknownOptions=0;
  2383. int enable=1; //does the user want us to enabled or disabled the filter
  2384. filterToken= strtok(p, filterDelimiters);
  2385. if(filterToken == NULL) break;
  2386. p+= strlen(filterToken) + 1;
  2387. filterName= strtok(filterToken, optionDelimiters);
  2388. printf("%s::%s\n", filterToken, filterName);
  2389. if(*filterName == '-')
  2390. {
  2391. enable=0;
  2392. filterName++;
  2393. }
  2394. for(;;){ //for all options
  2395. option= strtok(NULL, optionDelimiters);
  2396. if(option == NULL) break;
  2397. printf("%s\n", option);
  2398. if(!strcmp("autoq", option) || !strcmp("a", option)) q= quality;
  2399. else if(!strcmp("nochrom", option) || !strcmp("y", option)) chrom=0;
  2400. else if(!strcmp("chrom", option) || !strcmp("c", option)) chrom=1;
  2401. else
  2402. {
  2403. options[numOfUnknownOptions] = option;
  2404. numOfUnknownOptions++;
  2405. options[numOfUnknownOptions] = NULL;
  2406. }
  2407. if(numOfUnknownOptions >= OPTIONS_ARRAY_SIZE-1) break;
  2408. }
  2409. /* replace stuff from the replace Table */
  2410. for(i=0; replaceTable[2*i]!=NULL; i++)
  2411. {
  2412. if(!strcmp(replaceTable[2*i], filterName))
  2413. {
  2414. int newlen= strlen(replaceTable[2*i + 1]);
  2415. int plen;
  2416. int spaceLeft;
  2417. if(p==NULL) p= temp, *p=0; //last filter
  2418. else p--, *p=','; //not last filter
  2419. plen= strlen(p);
  2420. spaceLeft= (int)p - (int)temp + plen;
  2421. if(spaceLeft + newlen >= GET_MODE_BUFFER_SIZE)
  2422. {
  2423. ppMode.error++;
  2424. break;
  2425. }
  2426. memmove(p + newlen, p, plen+1);
  2427. memcpy(p, replaceTable[2*i + 1], newlen);
  2428. filterNameOk=1;
  2429. }
  2430. }
  2431. for(i=0; filters[i].shortName!=NULL; i++)
  2432. {
  2433. if( !strcmp(filters[i].longName, filterName)
  2434. || !strcmp(filters[i].shortName, filterName))
  2435. {
  2436. ppMode.lumMode &= ~filters[i].mask;
  2437. ppMode.chromMode &= ~filters[i].mask;
  2438. filterNameOk=1;
  2439. if(!enable) break; // user wants to disable it
  2440. if(q >= filters[i].minLumQuality)
  2441. ppMode.lumMode|= filters[i].mask;
  2442. if(chrom==1 || (chrom==-1 && filters[i].chromDefault))
  2443. if(q >= filters[i].minChromQuality)
  2444. ppMode.chromMode|= filters[i].mask;
  2445. if(filters[i].mask == LEVEL_FIX)
  2446. {
  2447. int o;
  2448. ppMode.minAllowedY= 16;
  2449. ppMode.maxAllowedY= 234;
  2450. for(o=0; options[o]!=NULL; o++)
  2451. if( !strcmp(options[o],"fullyrange")
  2452. ||!strcmp(options[o],"f"))
  2453. {
  2454. ppMode.minAllowedY= 0;
  2455. ppMode.maxAllowedY= 255;
  2456. numOfUnknownOptions--;
  2457. }
  2458. }
  2459. }
  2460. }
  2461. if(!filterNameOk) ppMode.error++;
  2462. ppMode.error += numOfUnknownOptions;
  2463. }
  2464. if(ppMode.lumMode & H_DEBLOCK) ppMode.oldMode |= PP_DEBLOCK_Y_H;
  2465. if(ppMode.lumMode & V_DEBLOCK) ppMode.oldMode |= PP_DEBLOCK_Y_V;
  2466. if(ppMode.chromMode & H_DEBLOCK) ppMode.oldMode |= PP_DEBLOCK_C_H;
  2467. if(ppMode.chromMode & V_DEBLOCK) ppMode.oldMode |= PP_DEBLOCK_C_V;
  2468. if(ppMode.lumMode & DERING) ppMode.oldMode |= PP_DERING_Y;
  2469. if(ppMode.chromMode & DERING) ppMode.oldMode |= PP_DERING_C;
  2470. return ppMode;
  2471. }
  2472. /**
  2473. * ...
  2474. */
  2475. void postprocess(unsigned char * src[], int src_stride,
  2476. unsigned char * dst[], int dst_stride,
  2477. int horizontal_size, int vertical_size,
  2478. QP_STORE_T *QP_store, int QP_stride,
  2479. int mode)
  2480. {
  2481. /*
  2482. static int qual=0;
  2483. struct PPMode ppMode= getPPModeByNameAndQuality("fast,default,-hdeblock,-vdeblock", qual);
  2484. qual++;
  2485. qual%=7;
  2486. printf("\n%d %d %d %d\n", ppMode.lumMode, ppMode.chromMode, ppMode.oldMode, ppMode.error);
  2487. postprocess2(src, src_stride, dst, dst_stride,
  2488. horizontal_size, vertical_size, QP_store, QP_stride, &ppMode);
  2489. return;
  2490. */
  2491. #ifdef HAVE_ODIVX_POSTPROCESS
  2492. // Note: I could make this shit outside of this file, but it would mean one
  2493. // more function call...
  2494. if(use_old_pp){
  2495. odivx_postprocess(src,src_stride,dst,dst_stride,horizontal_size,vertical_size,QP_store,QP_stride,mode);
  2496. return;
  2497. }
  2498. #endif
  2499. postProcess(src[0], src_stride, dst[0], dst_stride,
  2500. horizontal_size, vertical_size, QP_store, QP_stride, 0, mode);
  2501. horizontal_size >>= 1;
  2502. vertical_size >>= 1;
  2503. src_stride >>= 1;
  2504. dst_stride >>= 1;
  2505. mode= ((mode&0xFF)>>4) | (mode&0xFFFFFF00);
  2506. if(1)
  2507. {
  2508. postProcess(src[1], src_stride, dst[1], dst_stride,
  2509. horizontal_size, vertical_size, QP_store, QP_stride, 1, mode);
  2510. postProcess(src[2], src_stride, dst[2], dst_stride,
  2511. horizontal_size, vertical_size, QP_store, QP_stride, 2, mode);
  2512. }
  2513. else
  2514. {
  2515. memcpy(dst[1], src[1], src_stride*horizontal_size);
  2516. memcpy(dst[2], src[2], src_stride*horizontal_size);
  2517. }
  2518. }
  2519. void postprocess2(unsigned char * src[], int src_stride,
  2520. unsigned char * dst[], int dst_stride,
  2521. int horizontal_size, int vertical_size,
  2522. QP_STORE_T *QP_store, int QP_stride,
  2523. struct PPMode *mode)
  2524. {
  2525. #ifdef HAVE_ODIVX_POSTPROCESS
  2526. // Note: I could make this shit outside of this file, but it would mean one
  2527. // more function call...
  2528. if(use_old_pp){
  2529. odivx_postprocess(src,src_stride,dst,dst_stride,horizontal_size,vertical_size,QP_store,QP_stride,
  2530. mode->oldMode);
  2531. return;
  2532. }
  2533. #endif
  2534. postProcess(src[0], src_stride, dst[0], dst_stride,
  2535. horizontal_size, vertical_size, QP_store, QP_stride, 0, mode->lumMode);
  2536. horizontal_size >>= 1;
  2537. vertical_size >>= 1;
  2538. src_stride >>= 1;
  2539. dst_stride >>= 1;
  2540. postProcess(src[1], src_stride, dst[1], dst_stride,
  2541. horizontal_size, vertical_size, QP_store, QP_stride, 1, mode->chromMode);
  2542. postProcess(src[2], src_stride, dst[2], dst_stride,
  2543. horizontal_size, vertical_size, QP_store, QP_stride, 2, mode->chromMode);
  2544. }
  2545. /**
  2546. * gets the mode flags for a given quality (larger values mean slower but better postprocessing)
  2547. * 0 <= quality <= 6
  2548. */
  2549. int getPpModeForQuality(int quality){
  2550. int modes[1+GET_PP_QUALITY_MAX]= {
  2551. 0,
  2552. #if 1
  2553. // horizontal filters first
  2554. LUM_H_DEBLOCK,
  2555. LUM_H_DEBLOCK | LUM_V_DEBLOCK,
  2556. LUM_H_DEBLOCK | LUM_V_DEBLOCK | CHROM_H_DEBLOCK,
  2557. LUM_H_DEBLOCK | LUM_V_DEBLOCK | CHROM_H_DEBLOCK | CHROM_V_DEBLOCK,
  2558. LUM_H_DEBLOCK | LUM_V_DEBLOCK | CHROM_H_DEBLOCK | CHROM_V_DEBLOCK | LUM_DERING,
  2559. LUM_H_DEBLOCK | LUM_V_DEBLOCK | CHROM_H_DEBLOCK | CHROM_V_DEBLOCK | LUM_DERING | CHROM_DERING
  2560. #else
  2561. // vertical filters first
  2562. LUM_V_DEBLOCK,
  2563. LUM_V_DEBLOCK | LUM_H_DEBLOCK,
  2564. LUM_V_DEBLOCK | LUM_H_DEBLOCK | CHROM_V_DEBLOCK,
  2565. LUM_V_DEBLOCK | LUM_H_DEBLOCK | CHROM_V_DEBLOCK | CHROM_H_DEBLOCK,
  2566. LUM_V_DEBLOCK | LUM_H_DEBLOCK | CHROM_V_DEBLOCK | CHROM_H_DEBLOCK | LUM_DERING,
  2567. LUM_V_DEBLOCK | LUM_H_DEBLOCK | CHROM_V_DEBLOCK | CHROM_H_DEBLOCK | LUM_DERING | CHROM_DERING
  2568. #endif
  2569. };
  2570. #ifdef HAVE_ODIVX_POSTPROCESS
  2571. int odivx_modes[1+GET_PP_QUALITY_MAX]= {
  2572. 0,
  2573. PP_DEBLOCK_Y_H,
  2574. PP_DEBLOCK_Y_H|PP_DEBLOCK_Y_V,
  2575. PP_DEBLOCK_Y_H|PP_DEBLOCK_Y_V|PP_DEBLOCK_C_H,
  2576. PP_DEBLOCK_Y_H|PP_DEBLOCK_Y_V|PP_DEBLOCK_C_H|PP_DEBLOCK_C_V,
  2577. PP_DEBLOCK_Y_H|PP_DEBLOCK_Y_V|PP_DEBLOCK_C_H|PP_DEBLOCK_C_V|PP_DERING_Y,
  2578. PP_DEBLOCK_Y_H|PP_DEBLOCK_Y_V|PP_DEBLOCK_C_H|PP_DEBLOCK_C_V|PP_DERING_Y|PP_DERING_C
  2579. };
  2580. if(use_old_pp) return odivx_modes[quality];
  2581. #endif
  2582. return modes[quality];
  2583. }
  2584. /**
  2585. * Copies a block from src to dst and fixes the blacklevel
  2586. * numLines must be a multiple of 4
  2587. * levelFix == 0 -> dont touch the brighness & contrast
  2588. */
  2589. static inline void blockCopy(uint8_t dst[], int dstStride, uint8_t src[], int srcStride,
  2590. int numLines, int levelFix)
  2591. {
  2592. #ifndef HAVE_MMX
  2593. int i;
  2594. #endif
  2595. if(levelFix)
  2596. {
  2597. #ifdef HAVE_MMX
  2598. asm volatile(
  2599. "leal (%2,%2), %%eax \n\t"
  2600. "leal (%3,%3), %%ebx \n\t"
  2601. "movq packedYOffset, %%mm2 \n\t"
  2602. "movq packedYScale, %%mm3 \n\t"
  2603. "pxor %%mm4, %%mm4 \n\t"
  2604. #define SCALED_CPY \
  2605. "movq (%0), %%mm0 \n\t"\
  2606. "movq (%0), %%mm5 \n\t"\
  2607. "punpcklbw %%mm4, %%mm0 \n\t"\
  2608. "punpckhbw %%mm4, %%mm5 \n\t"\
  2609. "psubw %%mm2, %%mm0 \n\t"\
  2610. "psubw %%mm2, %%mm5 \n\t"\
  2611. "movq (%0,%2), %%mm1 \n\t"\
  2612. "psllw $6, %%mm0 \n\t"\
  2613. "psllw $6, %%mm5 \n\t"\
  2614. "pmulhw %%mm3, %%mm0 \n\t"\
  2615. "movq (%0,%2), %%mm6 \n\t"\
  2616. "pmulhw %%mm3, %%mm5 \n\t"\
  2617. "punpcklbw %%mm4, %%mm1 \n\t"\
  2618. "punpckhbw %%mm4, %%mm6 \n\t"\
  2619. "psubw %%mm2, %%mm1 \n\t"\
  2620. "psubw %%mm2, %%mm6 \n\t"\
  2621. "psllw $6, %%mm1 \n\t"\
  2622. "psllw $6, %%mm6 \n\t"\
  2623. "pmulhw %%mm3, %%mm1 \n\t"\
  2624. "pmulhw %%mm3, %%mm6 \n\t"\
  2625. "addl %%eax, %0 \n\t"\
  2626. "packuswb %%mm5, %%mm0 \n\t"\
  2627. "packuswb %%mm6, %%mm1 \n\t"\
  2628. "movq %%mm0, (%1) \n\t"\
  2629. "movq %%mm1, (%1, %3) \n\t"\
  2630. SCALED_CPY
  2631. "addl %%ebx, %1 \n\t"
  2632. SCALED_CPY
  2633. "addl %%ebx, %1 \n\t"
  2634. SCALED_CPY
  2635. "addl %%ebx, %1 \n\t"
  2636. SCALED_CPY
  2637. : "+r"(src),
  2638. "+r"(dst)
  2639. :"r" (srcStride),
  2640. "r" (dstStride)
  2641. : "%eax", "%ebx"
  2642. );
  2643. #else
  2644. for(i=0; i<numLines; i++)
  2645. memcpy( &(dst[dstStride*i]),
  2646. &(src[srcStride*i]), BLOCK_SIZE);
  2647. #endif
  2648. }
  2649. else
  2650. {
  2651. #ifdef HAVE_MMX
  2652. asm volatile(
  2653. "movl %4, %%eax \n\t"
  2654. "movl %%eax, temp0\n\t"
  2655. "pushl %0 \n\t"
  2656. "pushl %1 \n\t"
  2657. "leal (%2,%2), %%eax \n\t"
  2658. "leal (%3,%3), %%ebx \n\t"
  2659. "movq packedYOffset, %%mm2 \n\t"
  2660. "movq packedYScale, %%mm3 \n\t"
  2661. #define SIMPLE_CPY \
  2662. "movq (%0), %%mm0 \n\t"\
  2663. "movq (%0,%2), %%mm1 \n\t"\
  2664. "movq %%mm0, (%1) \n\t"\
  2665. "movq %%mm1, (%1, %3) \n\t"\
  2666. "1: \n\t"
  2667. SIMPLE_CPY
  2668. "addl %%eax, %0 \n\t"
  2669. "addl %%ebx, %1 \n\t"
  2670. SIMPLE_CPY
  2671. "addl %%eax, %0 \n\t"
  2672. "addl %%ebx, %1 \n\t"
  2673. "decl temp0 \n\t"
  2674. "jnz 1b \n\t"
  2675. "popl %1 \n\t"
  2676. "popl %0 \n\t"
  2677. : : "r" (src),
  2678. "r" (dst),
  2679. "r" (srcStride),
  2680. "r" (dstStride),
  2681. "m" (numLines>>2)
  2682. : "%eax", "%ebx"
  2683. );
  2684. #else
  2685. for(i=0; i<numLines; i++)
  2686. memcpy( &(dst[dstStride*i]),
  2687. &(src[srcStride*i]), BLOCK_SIZE);
  2688. #endif
  2689. }
  2690. }
  2691. /**
  2692. * Filters array of bytes (Y or U or V values)
  2693. */
  2694. static void postProcess(uint8_t src[], int srcStride, uint8_t dst[], int dstStride, int width, int height,
  2695. QP_STORE_T QPs[], int QPStride, int isColor, int mode)
  2696. {
  2697. int x,y;
  2698. /* we need 64bit here otherwise we´ll going to have a problem
  2699. after watching a black picture for 5 hours*/
  2700. static uint64_t *yHistogram= NULL;
  2701. int black=0, white=255; // blackest black and whitest white in the picture
  2702. int QPCorrecture= 256;
  2703. /* Temporary buffers for handling the last row(s) */
  2704. static uint8_t *tempDst= NULL;
  2705. static uint8_t *tempSrc= NULL;
  2706. /* Temporary buffers for handling the last block */
  2707. static uint8_t *tempDstBlock= NULL;
  2708. static uint8_t *tempSrcBlock= NULL;
  2709. #ifdef PP_FUNNY_STRIDE
  2710. uint8_t *dstBlockPtrBackup;
  2711. uint8_t *srcBlockPtrBackup;
  2712. #endif
  2713. #ifdef MORE_TIMING
  2714. long long T0, T1, diffTime=0;
  2715. #endif
  2716. #ifdef TIMING
  2717. long long memcpyTime=0, vertTime=0, horizTime=0, sumTime;
  2718. sumTime= rdtsc();
  2719. #endif
  2720. if(tempDst==NULL)
  2721. {
  2722. tempDst= (uint8_t*)memalign(8, 1024*24);
  2723. tempSrc= (uint8_t*)memalign(8, 1024*24);
  2724. tempDstBlock= (uint8_t*)memalign(8, 1024*24);
  2725. tempSrcBlock= (uint8_t*)memalign(8, 1024*24);
  2726. }
  2727. if(!yHistogram)
  2728. {
  2729. int i;
  2730. yHistogram= (uint64_t*)malloc(8*256);
  2731. for(i=0; i<256; i++) yHistogram[i]= width*height/64*15/256;
  2732. if(mode & FULL_Y_RANGE)
  2733. {
  2734. maxAllowedY=255;
  2735. minAllowedY=0;
  2736. }
  2737. }
  2738. if(!isColor)
  2739. {
  2740. uint64_t sum= 0;
  2741. int i;
  2742. static int framenum= -1;
  2743. uint64_t maxClipped;
  2744. uint64_t clipped;
  2745. double scale;
  2746. framenum++;
  2747. if(framenum == 1) yHistogram[0]= width*height/64*15/256;
  2748. for(i=0; i<256; i++)
  2749. {
  2750. sum+= yHistogram[i];
  2751. // printf("%d ", yHistogram[i]);
  2752. }
  2753. // printf("\n\n");
  2754. /* we allways get a completly black picture first */
  2755. maxClipped= (uint64_t)(sum * maxClippedThreshold);
  2756. clipped= sum;
  2757. for(black=255; black>0; black--)
  2758. {
  2759. if(clipped < maxClipped) break;
  2760. clipped-= yHistogram[black];
  2761. }
  2762. clipped= sum;
  2763. for(white=0; white<256; white++)
  2764. {
  2765. if(clipped < maxClipped) break;
  2766. clipped-= yHistogram[white];
  2767. }
  2768. packedYOffset= (black - minAllowedY) & 0xFFFF;
  2769. packedYOffset|= packedYOffset<<32;
  2770. packedYOffset|= packedYOffset<<16;
  2771. scale= (double)(maxAllowedY - minAllowedY) / (double)(white-black);
  2772. packedYScale= (uint16_t)(scale*1024.0 + 0.5);
  2773. packedYScale|= packedYScale<<32;
  2774. packedYScale|= packedYScale<<16;
  2775. }
  2776. else
  2777. {
  2778. packedYScale= 0x0100010001000100LL;
  2779. packedYOffset= 0;
  2780. }
  2781. if(mode & LEVEL_FIX) QPCorrecture= packedYScale &0xFFFF;
  2782. else QPCorrecture= 256;
  2783. /* copy first row of 8x8 blocks */
  2784. for(x=0; x<width; x+=BLOCK_SIZE)
  2785. blockCopy(dst + x, dstStride, src + x, srcStride, 8, mode & LEVEL_FIX);
  2786. for(y=0; y<height; y+=BLOCK_SIZE)
  2787. {
  2788. //1% speedup if these are here instead of the inner loop
  2789. uint8_t *srcBlock= &(src[y*srcStride]);
  2790. uint8_t *dstBlock= &(dst[y*dstStride]);
  2791. #ifdef ARCH_X86
  2792. int *QPptr= isColor ? &QPs[(y>>3)*QPStride] :&QPs[(y>>4)*QPStride];
  2793. int QPDelta= isColor ? 1<<(32-3) : 1<<(32-4);
  2794. int QPFrac= QPDelta;
  2795. uint8_t *tempBlock1= tempBlocks;
  2796. uint8_t *tempBlock2= tempBlocks + 8;
  2797. #endif
  2798. /* can we mess with a 8x16 block from srcBlock/dstBlock downwards and 1 line upwards
  2799. if not than use a temporary buffer */
  2800. if(y+15 >= height)
  2801. {
  2802. /* copy from line 5 to 12 of src, these will be copied with
  2803. blockcopy to dst later */
  2804. memcpy(tempSrc + srcStride*5, srcBlock + srcStride*5,
  2805. srcStride*MAX(height-y-5, 0) );
  2806. /* duplicate last line to fill the void upto line 12 */
  2807. if(y+12 >= height)
  2808. {
  2809. int i;
  2810. for(i=height-y; i<=12; i++)
  2811. memcpy(tempSrc + srcStride*i,
  2812. src + srcStride*(height-1), srcStride);
  2813. }
  2814. /* copy up to 6 lines of dst */
  2815. memcpy(tempDst, dstBlock - dstStride, dstStride*MIN(height-y+1, 6) );
  2816. dstBlock= tempDst + dstStride;
  2817. srcBlock= tempSrc;
  2818. }
  2819. // From this point on it is guranteed that we can read and write 16 lines downward
  2820. // finish 1 block before the next otherwise we´ll might have a problem
  2821. // with the L1 Cache of the P4 ... or only a few blocks at a time or soemthing
  2822. for(x=0; x<width; x+=BLOCK_SIZE)
  2823. {
  2824. const int stride= dstStride;
  2825. uint8_t *tmpXchg;
  2826. #ifdef ARCH_X86
  2827. int QP= *QPptr;
  2828. asm volatile(
  2829. "addl %2, %1 \n\t"
  2830. "sbbl %%eax, %%eax \n\t"
  2831. "shll $2, %%eax \n\t"
  2832. "subl %%eax, %0 \n\t"
  2833. : "+r" (QPptr), "+m" (QPFrac)
  2834. : "r" (QPDelta)
  2835. : "%eax"
  2836. );
  2837. #else
  2838. int QP= isColor ?
  2839. QPs[(y>>3)*QPStride + (x>>3)]:
  2840. QPs[(y>>4)*QPStride + (x>>4)];
  2841. #endif
  2842. if(!isColor)
  2843. {
  2844. QP= (QP* QPCorrecture)>>8;
  2845. yHistogram[ srcBlock[srcStride*4 + 4] ]++;
  2846. }
  2847. #ifdef HAVE_MMX
  2848. asm volatile(
  2849. "movd %0, %%mm7 \n\t"
  2850. "packuswb %%mm7, %%mm7 \n\t" // 0, 0, 0, QP, 0, 0, 0, QP
  2851. "packuswb %%mm7, %%mm7 \n\t" // 0,QP, 0, QP, 0,QP, 0, QP
  2852. "packuswb %%mm7, %%mm7 \n\t" // QP,..., QP
  2853. "movq %%mm7, pQPb \n\t"
  2854. : : "r" (QP)
  2855. );
  2856. #endif
  2857. #ifdef MORE_TIMING
  2858. T0= rdtsc();
  2859. #endif
  2860. #ifdef HAVE_MMX2
  2861. /*
  2862. prefetchnta(srcBlock + (((x>>3)&3) + 5)*srcStride + 32);
  2863. prefetchnta(srcBlock + (((x>>3)&3) + 9)*srcStride + 32);
  2864. prefetcht0(dstBlock + (((x>>3)&3) + 5)*dstStride + 32);
  2865. prefetcht0(dstBlock + (((x>>3)&3) + 9)*dstStride + 32);
  2866. */
  2867. /*
  2868. prefetchnta(srcBlock + (((x>>2)&6) + 5)*srcStride + 32);
  2869. prefetchnta(srcBlock + (((x>>2)&6) + 6)*srcStride + 32);
  2870. prefetcht0(dstBlock + (((x>>2)&6) + 5)*dstStride + 32);
  2871. prefetcht0(dstBlock + (((x>>2)&6) + 6)*dstStride + 32);
  2872. */
  2873. asm(
  2874. "movl %4, %%eax \n\t"
  2875. "shrl $2, %%eax \n\t"
  2876. "andl $6, %%eax \n\t"
  2877. "addl $5, %%eax \n\t"
  2878. "movl %%eax, %%ebx \n\t"
  2879. "imul %1, %%eax \n\t"
  2880. "imul %3, %%ebx \n\t"
  2881. "prefetchnta 32(%%eax, %0) \n\t"
  2882. "prefetcht0 32(%%ebx, %2) \n\t"
  2883. "addl %1, %%eax \n\t"
  2884. "addl %3, %%ebx \n\t"
  2885. "prefetchnta 32(%%eax, %0) \n\t"
  2886. "prefetcht0 32(%%ebx, %2) \n\t"
  2887. :: "r" (srcBlock), "r" (srcStride), "r" (dstBlock), "r" (dstStride),
  2888. "m" (x)
  2889. : "%eax", "%ebx"
  2890. );
  2891. #elif defined(HAVE_3DNOW)
  2892. //FIXME check if this is faster on an 3dnow chip or if its faster without the prefetch or ...
  2893. /* prefetch(srcBlock + (((x>>3)&3) + 5)*srcStride + 32);
  2894. prefetch(srcBlock + (((x>>3)&3) + 9)*srcStride + 32);
  2895. prefetchw(dstBlock + (((x>>3)&3) + 5)*dstStride + 32);
  2896. prefetchw(dstBlock + (((x>>3)&3) + 9)*dstStride + 32);
  2897. */
  2898. #endif
  2899. #ifdef PP_FUNNY_STRIDE
  2900. //can we mess with a 8x16 block, if not use a temp buffer, yes again
  2901. if(x+7 >= width)
  2902. {
  2903. int i;
  2904. dstBlockPtrBackup= dstBlock;
  2905. srcBlockPtrBackup= srcBlock;
  2906. for(i=0;i<BLOCK_SIZE*2; i++)
  2907. {
  2908. memcpy(tempSrcBlock+i*srcStride, srcBlock+i*srcStride, width-x);
  2909. memcpy(tempDstBlock+i*dstStride, dstBlock+i*dstStride, width-x);
  2910. }
  2911. dstBlock= tempDstBlock;
  2912. srcBlock= tempSrcBlock;
  2913. }
  2914. #endif
  2915. blockCopy(dstBlock + dstStride*5, dstStride,
  2916. srcBlock + srcStride*5, srcStride, 8, mode & LEVEL_FIX);
  2917. if(mode & LINEAR_IPOL_DEINT_FILTER)
  2918. deInterlaceInterpolateLinear(dstBlock, dstStride);
  2919. else if(mode & LINEAR_BLEND_DEINT_FILTER)
  2920. deInterlaceBlendLinear(dstBlock, dstStride);
  2921. else if(mode & MEDIAN_DEINT_FILTER)
  2922. deInterlaceMedian(dstBlock, dstStride);
  2923. else if(mode & CUBIC_IPOL_DEINT_FILTER)
  2924. deInterlaceInterpolateCubic(dstBlock, dstStride);
  2925. /* else if(mode & CUBIC_BLEND_DEINT_FILTER)
  2926. deInterlaceBlendCubic(dstBlock, dstStride);
  2927. */
  2928. /* only deblock if we have 2 blocks */
  2929. if(y + 8 < height)
  2930. {
  2931. #ifdef MORE_TIMING
  2932. T1= rdtsc();
  2933. memcpyTime+= T1-T0;
  2934. T0=T1;
  2935. #endif
  2936. if(mode & V_RK1_FILTER)
  2937. vertRK1Filter(dstBlock, stride, QP);
  2938. else if(mode & V_X1_FILTER)
  2939. vertX1Filter(dstBlock, stride, QP);
  2940. else if(mode & V_DEBLOCK)
  2941. {
  2942. if( isVertDC(dstBlock, stride))
  2943. {
  2944. if(isVertMinMaxOk(dstBlock, stride, QP))
  2945. doVertLowPass(dstBlock, stride, QP);
  2946. }
  2947. else
  2948. doVertDefFilter(dstBlock, stride, QP);
  2949. }
  2950. #ifdef MORE_TIMING
  2951. T1= rdtsc();
  2952. vertTime+= T1-T0;
  2953. T0=T1;
  2954. #endif
  2955. }
  2956. #ifdef HAVE_MMX
  2957. transpose1(tempBlock1, tempBlock2, dstBlock, dstStride);
  2958. #endif
  2959. /* check if we have a previous block to deblock it with dstBlock */
  2960. if(x - 8 >= 0)
  2961. {
  2962. #ifdef MORE_TIMING
  2963. T0= rdtsc();
  2964. #endif
  2965. #ifdef HAVE_MMX
  2966. if(mode & H_RK1_FILTER)
  2967. vertRK1Filter(tempBlock1, 16, QP);
  2968. else if(mode & H_X1_FILTER)
  2969. vertX1Filter(tempBlock1, 16, QP);
  2970. else if(mode & H_DEBLOCK)
  2971. {
  2972. if( isVertDC(tempBlock1, 16))
  2973. {
  2974. if(isVertMinMaxOk(tempBlock1, 16, QP))
  2975. doVertLowPass(tempBlock1, 16, QP);
  2976. }
  2977. else
  2978. doVertDefFilter(tempBlock1, 16, QP);
  2979. }
  2980. transpose2(dstBlock-4, dstStride, tempBlock1 + 4*16);
  2981. #else
  2982. if(mode & H_X1_FILTER)
  2983. horizX1Filter(dstBlock-4, stride, QP);
  2984. else if(mode & H_DEBLOCK)
  2985. {
  2986. if( isHorizDC(dstBlock-4, stride))
  2987. {
  2988. if(isHorizMinMaxOk(dstBlock-4, stride, QP))
  2989. doHorizLowPass(dstBlock-4, stride, QP);
  2990. }
  2991. else
  2992. doHorizDefFilter(dstBlock-4, stride, QP);
  2993. }
  2994. #endif
  2995. #ifdef MORE_TIMING
  2996. T1= rdtsc();
  2997. horizTime+= T1-T0;
  2998. T0=T1;
  2999. #endif
  3000. if(mode & DERING)
  3001. {
  3002. //FIXME filter first line
  3003. if(y>0) dering(dstBlock - stride - 8, stride, QP);
  3004. }
  3005. }
  3006. else if(mode & DERING)
  3007. {
  3008. //FIXME y+15 is required cuz of the tempBuffer thing -> bottom right block isnt filtered
  3009. if(y > 8 && y+15 < height) dering(dstBlock - stride*9 + width - 8, stride, QP);
  3010. }
  3011. #ifdef PP_FUNNY_STRIDE
  3012. /* did we use a tmp-block buffer */
  3013. if(x+7 >= width)
  3014. {
  3015. int i;
  3016. dstBlock= dstBlockPtrBackup;
  3017. srcBlock= srcBlockPtrBackup;
  3018. for(i=0;i<BLOCK_SIZE*2; i++)
  3019. {
  3020. memcpy(dstBlock+i*dstStride, tempDstBlock+i*dstStride, width-x);
  3021. }
  3022. }
  3023. #endif
  3024. dstBlock+=8;
  3025. srcBlock+=8;
  3026. #ifdef HAVE_MMX
  3027. tmpXchg= tempBlock1;
  3028. tempBlock1= tempBlock2;
  3029. tempBlock2 = tmpXchg;
  3030. #endif
  3031. }
  3032. /* did we use a tmp buffer */
  3033. if(y+15 >= height)
  3034. {
  3035. uint8_t *dstBlock= &(dst[y*dstStride]);
  3036. memcpy(dstBlock, tempDst + dstStride, dstStride*(height-y) );
  3037. }
  3038. }
  3039. #ifdef HAVE_3DNOW
  3040. asm volatile("femms");
  3041. #elif defined (HAVE_MMX)
  3042. asm volatile("emms");
  3043. #endif
  3044. #ifdef TIMING
  3045. // FIXME diff is mostly the time spent for rdtsc (should subtract that but ...)
  3046. sumTime= rdtsc() - sumTime;
  3047. if(!isColor)
  3048. printf("cpy:%4dk, vert:%4dk, horiz:%4dk, sum:%4dk, diff:%4dk, color: %d/%d \r",
  3049. (int)(memcpyTime/1000), (int)(vertTime/1000), (int)(horizTime/1000),
  3050. (int)(sumTime/1000), (int)((sumTime-memcpyTime-vertTime-horizTime)/1000)
  3051. , black, white);
  3052. #endif
  3053. }