You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3486 lines
101KB

  1. /*
  2. Copyright (C) 2001 Michael Niedermayer (michaelni@gmx.at)
  3. This program is free software; you can redistribute it and/or modify
  4. it under the terms of the GNU General Public License as published by
  5. the Free Software Foundation; either version 2 of the License, or
  6. (at your option) any later version.
  7. This program is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU General Public License for more details.
  11. You should have received a copy of the GNU General Public License
  12. along with this program; if not, write to the Free Software
  13. Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  14. */
  15. /*
  16. C MMX MMX2 3DNow
  17. isVertDC Ec Ec
  18. isVertMinMaxOk Ec Ec
  19. doVertLowPass E e e
  20. doVertDefFilter Ec Ec Ec
  21. isHorizDC Ec Ec
  22. isHorizMinMaxOk a E
  23. doHorizLowPass E e e
  24. doHorizDefFilter Ec Ec Ec
  25. deRing E e e*
  26. Vertical RKAlgo1 E a a
  27. Horizontal RKAlgo1 a a
  28. Vertical X1 a E E
  29. Horizontal X1 a E E
  30. LinIpolDeinterlace e E E*
  31. CubicIpolDeinterlace a e e*
  32. LinBlendDeinterlace e E E*
  33. MedianDeinterlace Ec Ec
  34. * i dont have a 3dnow CPU -> its untested
  35. E = Exact implementation
  36. e = allmost exact implementation (slightly different rounding,...)
  37. a = alternative / approximate impl
  38. c = checked against the other implementations (-vo md5)
  39. */
  40. /*
  41. TODO:
  42. verify that everything workes as it should (how?)
  43. reduce the time wasted on the mem transfer
  44. implement dering
  45. implement everything in C at least (done at the moment but ...)
  46. unroll stuff if instructions depend too much on the prior one
  47. we use 8x8 blocks for the horizontal filters, opendivx seems to use 8x4?
  48. move YScale thing to the end instead of fixing QP
  49. write a faster and higher quality deblocking filter :)
  50. do something about the speed of the horizontal filters
  51. make the mainloop more flexible (variable number of blocks at once
  52. (the if/else stuff per block is slowing things down)
  53. compare the quality & speed of all filters
  54. split this huge file
  55. fix warnings (unused vars, ...)
  56. noise reduction filters
  57. border remover
  58. optimize c versions
  59. ...
  60. Notes:
  61. */
  62. //Changelog: use the CVS log
  63. #include <inttypes.h>
  64. #include <stdio.h>
  65. #include <stdlib.h>
  66. #include <string.h>
  67. #include "../config.h"
  68. #ifdef HAVE_MALLOC_H
  69. #include <malloc.h>
  70. #endif
  71. //#undef HAVE_MMX2
  72. //#define HAVE_3DNOW
  73. //#undef HAVE_MMX
  74. #include "postprocess.h"
  75. #define MIN(a,b) ((a) > (b) ? (b) : (a))
  76. #define MAX(a,b) ((a) < (b) ? (b) : (a))
  77. #define ABS(a) ((a) > 0 ? (a) : (-(a)))
  78. #define SIGN(a) ((a) > 0 ? 1 : -1)
  79. #ifdef HAVE_MMX2
  80. #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
  81. #elif defined (HAVE_3DNOW)
  82. #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
  83. #endif
  84. #ifdef HAVE_MMX2
  85. #define PMINUB(a,b,t) "pminub " #a ", " #b " \n\t"
  86. #elif defined (HAVE_MMX)
  87. #define PMINUB(b,a,t) \
  88. "movq " #a ", " #t " \n\t"\
  89. "psubusb " #b ", " #t " \n\t"\
  90. "psubb " #t ", " #a " \n\t"
  91. #endif
  92. #ifdef HAVE_MMX2
  93. #define PMAXUB(a,b) "pmaxub " #a ", " #b " \n\t"
  94. #elif defined (HAVE_MMX)
  95. #define PMAXUB(a,b) \
  96. "psubusb " #a ", " #b " \n\t"\
  97. "paddb " #a ", " #b " \n\t"
  98. #endif
  99. #define GET_MODE_BUFFER_SIZE 500
  100. #define OPTIONS_ARRAY_SIZE 10
  101. #ifdef HAVE_MMX
  102. static uint64_t __attribute__((aligned(8))) packedYOffset= 0x0000000000000000LL;
  103. static uint64_t __attribute__((aligned(8))) packedYScale= 0x0100010001000100LL;
  104. static uint64_t __attribute__((aligned(8))) w05= 0x0005000500050005LL;
  105. static uint64_t __attribute__((aligned(8))) w20= 0x0020002000200020LL;
  106. static uint64_t __attribute__((aligned(8))) w1400= 0x1400140014001400LL;
  107. static uint64_t __attribute__((aligned(8))) bm00000001= 0x00000000000000FFLL;
  108. static uint64_t __attribute__((aligned(8))) bm00010000= 0x000000FF00000000LL;
  109. static uint64_t __attribute__((aligned(8))) bm00001000= 0x00000000FF000000LL;
  110. static uint64_t __attribute__((aligned(8))) bm10000000= 0xFF00000000000000LL;
  111. static uint64_t __attribute__((aligned(8))) bm10000001= 0xFF000000000000FFLL;
  112. static uint64_t __attribute__((aligned(8))) bm11000011= 0xFFFF00000000FFFFLL;
  113. static uint64_t __attribute__((aligned(8))) bm00000011= 0x000000000000FFFFLL;
  114. static uint64_t __attribute__((aligned(8))) bm11111110= 0xFFFFFFFFFFFFFF00LL;
  115. static uint64_t __attribute__((aligned(8))) bm11000000= 0xFFFF000000000000LL;
  116. static uint64_t __attribute__((aligned(8))) bm00011000= 0x000000FFFF000000LL;
  117. static uint64_t __attribute__((aligned(8))) bm00110011= 0x0000FFFF0000FFFFLL;
  118. static uint64_t __attribute__((aligned(8))) bm11001100= 0xFFFF0000FFFF0000LL;
  119. static uint64_t __attribute__((aligned(8))) b00= 0x0000000000000000LL;
  120. static uint64_t __attribute__((aligned(8))) b01= 0x0101010101010101LL;
  121. static uint64_t __attribute__((aligned(8))) b02= 0x0202020202020202LL;
  122. static uint64_t __attribute__((aligned(8))) b0F= 0x0F0F0F0F0F0F0F0FLL;
  123. static uint64_t __attribute__((aligned(8))) b04= 0x0404040404040404LL;
  124. static uint64_t __attribute__((aligned(8))) b08= 0x0808080808080808LL;
  125. static uint64_t __attribute__((aligned(8))) bFF= 0xFFFFFFFFFFFFFFFFLL;
  126. static uint64_t __attribute__((aligned(8))) b20= 0x2020202020202020LL;
  127. static uint64_t __attribute__((aligned(8))) b80= 0x8080808080808080LL;
  128. static uint64_t __attribute__((aligned(8))) b7E= 0x7E7E7E7E7E7E7E7ELL;
  129. static uint64_t __attribute__((aligned(8))) b7C= 0x7C7C7C7C7C7C7C7CLL;
  130. static uint64_t __attribute__((aligned(8))) b3F= 0x3F3F3F3F3F3F3F3FLL;
  131. static uint64_t __attribute__((aligned(8))) temp0=0;
  132. static uint64_t __attribute__((aligned(8))) temp1=0;
  133. static uint64_t __attribute__((aligned(8))) temp2=0;
  134. static uint64_t __attribute__((aligned(8))) temp3=0;
  135. static uint64_t __attribute__((aligned(8))) temp4=0;
  136. static uint64_t __attribute__((aligned(8))) temp5=0;
  137. static uint64_t __attribute__((aligned(8))) pQPb=0;
  138. static uint64_t __attribute__((aligned(8))) pQPb2=0;
  139. static uint8_t __attribute__((aligned(8))) tempBlocks[8*16*2]; //used for the horizontal code
  140. #else
  141. static uint64_t packedYOffset= 0x0000000000000000LL;
  142. static uint64_t packedYScale= 0x0100010001000100LL;
  143. static uint8_t tempBlocks[8*16*2]; //used for the horizontal code
  144. #endif
  145. int hFlatnessThreshold= 56 - 16;
  146. int vFlatnessThreshold= 56 - 16;
  147. //amount of "black" u r willing to loose to get a brightness corrected picture
  148. double maxClippedThreshold= 0.01;
  149. int maxAllowedY=234;
  150. int minAllowedY=16;
  151. static struct PPFilter filters[]=
  152. {
  153. {"hb", "hdeblock", 1, 1, 3, H_DEBLOCK},
  154. {"vb", "vdeblock", 1, 2, 4, V_DEBLOCK},
  155. {"vr", "rkvdeblock", 1, 2, 4, H_RK1_FILTER},
  156. {"h1", "x1hdeblock", 1, 1, 3, H_X1_FILTER},
  157. {"v1", "x1vdeblock", 1, 2, 4, V_X1_FILTER},
  158. {"dr", "dering", 1, 5, 6, DERING},
  159. {"al", "autolevels", 0, 1, 2, LEVEL_FIX},
  160. {"lb", "linblenddeint", 0, 1, 6, LINEAR_BLEND_DEINT_FILTER},
  161. {"li", "linipoldeint", 0, 1, 6, LINEAR_IPOL_DEINT_FILTER},
  162. {"ci", "cubicipoldeint", 0, 1, 6, CUBIC_IPOL_DEINT_FILTER},
  163. {"md", "mediandeint", 0, 1, 6, MEDIAN_DEINT_FILTER},
  164. {NULL, NULL,0,0,0,0} //End Marker
  165. };
  166. static char *replaceTable[]=
  167. {
  168. "default", "hdeblock:a,vdeblock:a,dering:a,autolevels",
  169. "de", "hdeblock:a,vdeblock:a,dering:a,autolevels",
  170. "fast", "x1hdeblock:a,x1vdeblock:a,dering:a,autolevels",
  171. "fa", "x1hdeblock:a,x1vdeblock:a,dering:a,autolevels",
  172. NULL //End Marker
  173. };
  174. #ifdef HAVE_MMX
  175. static inline void unusedVariableWarningFixer()
  176. {
  177. if(
  178. packedYOffset + packedYScale + w05 + w20 + w1400 + bm00000001 + bm00010000
  179. + bm00001000 + bm10000000 + bm10000001 + bm11000011 + bm00000011 + bm11111110
  180. + bm11000000 + bm00011000 + bm00110011 + bm11001100 + b00 + b01 + b02 + b0F
  181. + bFF + b20 + b04+ b08 + pQPb2 + b80 + b7E + b7C + b3F + temp0 + temp1 + temp2 + temp3 + temp4
  182. + temp5 + pQPb== 0) b00=0;
  183. }
  184. #endif
  185. #ifdef TIMING
  186. static inline long long rdtsc()
  187. {
  188. long long l;
  189. asm volatile( "rdtsc\n\t"
  190. : "=A" (l)
  191. );
  192. // printf("%d\n", int(l/1000));
  193. return l;
  194. }
  195. #endif
  196. #ifdef HAVE_MMX2
  197. static inline void prefetchnta(void *p)
  198. {
  199. asm volatile( "prefetchnta (%0)\n\t"
  200. : : "r" (p)
  201. );
  202. }
  203. static inline void prefetcht0(void *p)
  204. {
  205. asm volatile( "prefetcht0 (%0)\n\t"
  206. : : "r" (p)
  207. );
  208. }
  209. static inline void prefetcht1(void *p)
  210. {
  211. asm volatile( "prefetcht1 (%0)\n\t"
  212. : : "r" (p)
  213. );
  214. }
  215. static inline void prefetcht2(void *p)
  216. {
  217. asm volatile( "prefetcht2 (%0)\n\t"
  218. : : "r" (p)
  219. );
  220. }
  221. #endif
  222. //FIXME? |255-0| = 1 (shouldnt be a problem ...)
  223. /**
  224. * Check if the middle 8x8 Block in the given 8x16 block is flat
  225. */
  226. static inline int isVertDC(uint8_t src[], int stride){
  227. int numEq= 0;
  228. #ifndef HAVE_MMX
  229. int y;
  230. #endif
  231. src+= stride*4; // src points to begin of the 8x8 Block
  232. #ifdef HAVE_MMX
  233. asm volatile(
  234. "leal (%1, %2), %%eax \n\t"
  235. "leal (%%eax, %2, 4), %%ebx \n\t"
  236. // 0 1 2 3 4 5 6 7 8 9
  237. // %1 eax eax+%2 eax+2%2 %1+4%2 ebx ebx+%2 ebx+2%2 %1+8%2 ebx+4%2
  238. "movq b7E, %%mm7 \n\t" // mm7 = 0x7F
  239. "movq b7C, %%mm6 \n\t" // mm6 = 0x7D
  240. "movq (%1), %%mm0 \n\t"
  241. "movq (%%eax), %%mm1 \n\t"
  242. "psubb %%mm1, %%mm0 \n\t" // mm0 = differnece
  243. "paddb %%mm7, %%mm0 \n\t"
  244. "pcmpgtb %%mm6, %%mm0 \n\t"
  245. "movq (%%eax,%2), %%mm2 \n\t"
  246. "psubb %%mm2, %%mm1 \n\t"
  247. "paddb %%mm7, %%mm1 \n\t"
  248. "pcmpgtb %%mm6, %%mm1 \n\t"
  249. "paddb %%mm1, %%mm0 \n\t"
  250. "movq (%%eax, %2, 2), %%mm1 \n\t"
  251. "psubb %%mm1, %%mm2 \n\t"
  252. "paddb %%mm7, %%mm2 \n\t"
  253. "pcmpgtb %%mm6, %%mm2 \n\t"
  254. "paddb %%mm2, %%mm0 \n\t"
  255. "movq (%1, %2, 4), %%mm2 \n\t"
  256. "psubb %%mm2, %%mm1 \n\t"
  257. "paddb %%mm7, %%mm1 \n\t"
  258. "pcmpgtb %%mm6, %%mm1 \n\t"
  259. "paddb %%mm1, %%mm0 \n\t"
  260. "movq (%%ebx), %%mm1 \n\t"
  261. "psubb %%mm1, %%mm2 \n\t"
  262. "paddb %%mm7, %%mm2 \n\t"
  263. "pcmpgtb %%mm6, %%mm2 \n\t"
  264. "paddb %%mm2, %%mm0 \n\t"
  265. "movq (%%ebx, %2), %%mm2 \n\t"
  266. "psubb %%mm2, %%mm1 \n\t"
  267. "paddb %%mm7, %%mm1 \n\t"
  268. "pcmpgtb %%mm6, %%mm1 \n\t"
  269. "paddb %%mm1, %%mm0 \n\t"
  270. "movq (%%ebx, %2, 2), %%mm1 \n\t"
  271. "psubb %%mm1, %%mm2 \n\t"
  272. "paddb %%mm7, %%mm2 \n\t"
  273. "pcmpgtb %%mm6, %%mm2 \n\t"
  274. "paddb %%mm2, %%mm0 \n\t"
  275. " \n\t"
  276. "movq %%mm0, %%mm1 \n\t"
  277. "psrlw $8, %%mm0 \n\t"
  278. "paddb %%mm1, %%mm0 \n\t"
  279. #ifdef HAVE_MMX2
  280. "pshufw $0xF9, %%mm0, %%mm1 \n\t"
  281. "paddb %%mm1, %%mm0 \n\t"
  282. "pshufw $0xFE, %%mm0, %%mm1 \n\t"
  283. #else
  284. "movq %%mm0, %%mm1 \n\t"
  285. "psrlq $16, %%mm0 \n\t"
  286. "paddb %%mm1, %%mm0 \n\t"
  287. "movq %%mm0, %%mm1 \n\t"
  288. "psrlq $32, %%mm0 \n\t"
  289. #endif
  290. "paddb %%mm1, %%mm0 \n\t"
  291. "movd %%mm0, %0 \n\t"
  292. : "=r" (numEq)
  293. : "r" (src), "r" (stride)
  294. : "%eax", "%ebx"
  295. );
  296. numEq= (256 - numEq) &0xFF;
  297. #else
  298. for(y=0; y<BLOCK_SIZE-1; y++)
  299. {
  300. if(((src[0] - src[0+stride] + 1)&0xFFFF) < 3) numEq++;
  301. if(((src[1] - src[1+stride] + 1)&0xFFFF) < 3) numEq++;
  302. if(((src[2] - src[2+stride] + 1)&0xFFFF) < 3) numEq++;
  303. if(((src[3] - src[3+stride] + 1)&0xFFFF) < 3) numEq++;
  304. if(((src[4] - src[4+stride] + 1)&0xFFFF) < 3) numEq++;
  305. if(((src[5] - src[5+stride] + 1)&0xFFFF) < 3) numEq++;
  306. if(((src[6] - src[6+stride] + 1)&0xFFFF) < 3) numEq++;
  307. if(((src[7] - src[7+stride] + 1)&0xFFFF) < 3) numEq++;
  308. src+= stride;
  309. }
  310. #endif
  311. /* if(abs(numEq - asmEq) > 0)
  312. {
  313. printf("\nasm:%d c:%d\n", asmEq, numEq);
  314. for(int y=0; y<8; y++)
  315. {
  316. for(int x=0; x<8; x++)
  317. {
  318. printf("%d ", temp[x + y*stride]);
  319. }
  320. printf("\n");
  321. }
  322. }
  323. */
  324. // for(int i=0; i<numEq/8; i++) src[i]=255;
  325. return (numEq > vFlatnessThreshold) ? 1 : 0;
  326. }
  327. static inline int isVertMinMaxOk(uint8_t src[], int stride, int QP)
  328. {
  329. #ifdef HAVE_MMX
  330. int isOk;
  331. src+= stride*3;
  332. asm volatile(
  333. // "int $3 \n\t"
  334. "movq (%1, %2), %%mm0 \n\t"
  335. "movq (%1, %2, 8), %%mm1 \n\t"
  336. "movq %%mm0, %%mm2 \n\t"
  337. "psubusb %%mm1, %%mm0 \n\t"
  338. "psubusb %%mm2, %%mm1 \n\t"
  339. "por %%mm1, %%mm0 \n\t" // ABS Diff
  340. "movq pQPb, %%mm7 \n\t" // QP,..., QP
  341. "paddusb %%mm7, %%mm7 \n\t" // 2QP ... 2QP
  342. "psubusb %%mm7, %%mm0 \n\t" // Diff <= 2QP -> 0
  343. "pcmpeqd b00, %%mm0 \n\t"
  344. "psrlq $16, %%mm0 \n\t"
  345. "pcmpeqd bFF, %%mm0 \n\t"
  346. // "movd %%mm0, (%1, %2, 4)\n\t"
  347. "movd %%mm0, %0 \n\t"
  348. : "=r" (isOk)
  349. : "r" (src), "r" (stride)
  350. );
  351. return isOk;
  352. #else
  353. int isOk2= 1;
  354. int x;
  355. src+= stride*3;
  356. for(x=0; x<BLOCK_SIZE; x++)
  357. {
  358. if(abs((int)src[x + stride] - (int)src[x + (stride<<3)]) > 2*QP) isOk2=0;
  359. }
  360. /* if(isOk && !isOk2 || !isOk && isOk2)
  361. {
  362. printf("\nasm:%d c:%d QP:%d\n", isOk, isOk2, QP);
  363. for(int y=0; y<9; y++)
  364. {
  365. for(int x=0; x<8; x++)
  366. {
  367. printf("%d ", src[x + y*stride]);
  368. }
  369. printf("\n");
  370. }
  371. } */
  372. return isOk2;
  373. #endif
  374. }
  375. /**
  376. * Do a vertical low pass filter on the 8x16 block (only write to the 8x8 block in the middle)
  377. * using the 9-Tap Filter (1,1,2,2,4,2,2,1,1)/16
  378. */
  379. static inline void doVertLowPass(uint8_t *src, int stride, int QP)
  380. {
  381. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  382. src+= stride*3;
  383. asm volatile( //"movv %0 %1 %2\n\t"
  384. "movq pQPb, %%mm0 \n\t" // QP,..., QP
  385. "movq (%0), %%mm6 \n\t"
  386. "movq (%0, %1), %%mm5 \n\t"
  387. "movq %%mm5, %%mm1 \n\t"
  388. "movq %%mm6, %%mm2 \n\t"
  389. "psubusb %%mm6, %%mm5 \n\t"
  390. "psubusb %%mm1, %%mm2 \n\t"
  391. "por %%mm5, %%mm2 \n\t" // ABS Diff of lines
  392. "psubusb %%mm0, %%mm2 \n\t" // diff <= QP -> 0
  393. "pcmpeqb b00, %%mm2 \n\t" // diff <= QP -> FF
  394. "pand %%mm2, %%mm6 \n\t"
  395. "pandn %%mm1, %%mm2 \n\t"
  396. "por %%mm2, %%mm6 \n\t"// First Line to Filter
  397. "movq (%0, %1, 8), %%mm5 \n\t"
  398. "leal (%0, %1, 4), %%eax \n\t"
  399. "leal (%0, %1, 8), %%ebx \n\t"
  400. "subl %1, %%ebx \n\t"
  401. "addl %1, %0 \n\t" // %0 points to line 1 not 0
  402. "movq (%0, %1, 8), %%mm7 \n\t"
  403. "movq %%mm5, %%mm1 \n\t"
  404. "movq %%mm7, %%mm2 \n\t"
  405. "psubusb %%mm7, %%mm5 \n\t"
  406. "psubusb %%mm1, %%mm2 \n\t"
  407. "por %%mm5, %%mm2 \n\t" // ABS Diff of lines
  408. "psubusb %%mm0, %%mm2 \n\t" // diff <= QP -> 0
  409. "pcmpeqb b00, %%mm2 \n\t" // diff <= QP -> FF
  410. "pand %%mm2, %%mm7 \n\t"
  411. "pandn %%mm1, %%mm2 \n\t"
  412. "por %%mm2, %%mm7 \n\t" // First Line to Filter
  413. // 1 2 3 4 5 6 7 8
  414. // %0 %0+%1 %0+2%1 eax %0+4%1 eax+2%1 ebx eax+4%1
  415. // 6 4 2 2 1 1
  416. // 6 4 4 2
  417. // 6 8 2
  418. "movq (%0, %1), %%mm0 \n\t" // 1
  419. "movq %%mm0, %%mm1 \n\t" // 1
  420. PAVGB(%%mm6, %%mm0) //1 1 /2
  421. PAVGB(%%mm6, %%mm0) //3 1 /4
  422. "movq (%0, %1, 4), %%mm2 \n\t" // 1
  423. "movq %%mm2, %%mm5 \n\t" // 1
  424. PAVGB((%%eax), %%mm2) // 11 /2
  425. PAVGB((%0, %1, 2), %%mm2) // 211 /4
  426. "movq %%mm2, %%mm3 \n\t" // 211 /4
  427. "movq (%0), %%mm4 \n\t" // 1
  428. PAVGB(%%mm4, %%mm3) // 4 211 /8
  429. PAVGB(%%mm0, %%mm3) //642211 /16
  430. "movq %%mm3, (%0) \n\t" // X
  431. // mm1=2 mm2=3(211) mm4=1 mm5=5 mm6=0 mm7=9
  432. "movq %%mm1, %%mm0 \n\t" // 1
  433. PAVGB(%%mm6, %%mm0) //1 1 /2
  434. "movq %%mm4, %%mm3 \n\t" // 1
  435. PAVGB((%0,%1,2), %%mm3) // 1 1 /2
  436. PAVGB((%%eax,%1,2), %%mm5) // 11 /2
  437. PAVGB((%%eax), %%mm5) // 211 /4
  438. PAVGB(%%mm5, %%mm3) // 2 2211 /8
  439. PAVGB(%%mm0, %%mm3) //4242211 /16
  440. "movq %%mm3, (%0,%1) \n\t" // X
  441. // mm1=2 mm2=3(211) mm4=1 mm5=4(211) mm6=0 mm7=9
  442. PAVGB(%%mm4, %%mm6) //11 /2
  443. "movq (%%ebx), %%mm0 \n\t" // 1
  444. PAVGB((%%eax, %1, 2), %%mm0) // 11/2
  445. "movq %%mm0, %%mm3 \n\t" // 11/2
  446. PAVGB(%%mm1, %%mm0) // 2 11/4
  447. PAVGB(%%mm6, %%mm0) //222 11/8
  448. PAVGB(%%mm2, %%mm0) //22242211/16
  449. "movq (%0, %1, 2), %%mm2 \n\t" // 1
  450. "movq %%mm0, (%0, %1, 2) \n\t" // X
  451. // mm1=2 mm2=3 mm3=6(11) mm4=1 mm5=4(211) mm6=0(11) mm7=9
  452. "movq (%%eax, %1, 4), %%mm0 \n\t" // 1
  453. PAVGB((%%ebx), %%mm0) // 11 /2
  454. PAVGB(%%mm0, %%mm6) //11 11 /4
  455. PAVGB(%%mm1, %%mm4) // 11 /2
  456. PAVGB(%%mm2, %%mm1) // 11 /2
  457. PAVGB(%%mm1, %%mm6) //1122 11 /8
  458. PAVGB(%%mm5, %%mm6) //112242211 /16
  459. "movq (%%eax), %%mm5 \n\t" // 1
  460. "movq %%mm6, (%%eax) \n\t" // X
  461. // mm0=7(11) mm1=2(11) mm2=3 mm3=6(11) mm4=1(11) mm5=4 mm7=9
  462. "movq (%%eax, %1, 4), %%mm6 \n\t" // 1
  463. PAVGB(%%mm7, %%mm6) // 11 /2
  464. PAVGB(%%mm4, %%mm6) // 11 11 /4
  465. PAVGB(%%mm3, %%mm6) // 11 2211 /8
  466. PAVGB(%%mm5, %%mm2) // 11 /2
  467. "movq (%0, %1, 4), %%mm4 \n\t" // 1
  468. PAVGB(%%mm4, %%mm2) // 112 /4
  469. PAVGB(%%mm2, %%mm6) // 112242211 /16
  470. "movq %%mm6, (%0, %1, 4) \n\t" // X
  471. // mm0=7(11) mm1=2(11) mm2=3(112) mm3=6(11) mm4=5 mm5=4 mm7=9
  472. PAVGB(%%mm7, %%mm1) // 11 2 /4
  473. PAVGB(%%mm4, %%mm5) // 11 /2
  474. PAVGB(%%mm5, %%mm0) // 11 11 /4
  475. "movq (%%eax, %1, 2), %%mm6 \n\t" // 1
  476. PAVGB(%%mm6, %%mm1) // 11 4 2 /8
  477. PAVGB(%%mm0, %%mm1) // 11224222 /16
  478. "movq %%mm1, (%%eax, %1, 2) \n\t" // X
  479. // mm2=3(112) mm3=6(11) mm4=5 mm5=4(11) mm6=6 mm7=9
  480. PAVGB((%%ebx), %%mm2) // 112 4 /8
  481. "movq (%%eax, %1, 4), %%mm0 \n\t" // 1
  482. PAVGB(%%mm0, %%mm6) // 1 1 /2
  483. PAVGB(%%mm7, %%mm6) // 1 12 /4
  484. PAVGB(%%mm2, %%mm6) // 1122424 /4
  485. "movq %%mm6, (%%ebx) \n\t" // X
  486. // mm0=8 mm3=6(11) mm4=5 mm5=4(11) mm7=9
  487. PAVGB(%%mm7, %%mm5) // 11 2 /4
  488. PAVGB(%%mm7, %%mm5) // 11 6 /8
  489. PAVGB(%%mm3, %%mm0) // 112 /4
  490. PAVGB(%%mm0, %%mm5) // 112246 /16
  491. "movq %%mm5, (%%eax, %1, 4) \n\t" // X
  492. "subl %1, %0 \n\t"
  493. :
  494. : "r" (src), "r" (stride)
  495. : "%eax", "%ebx"
  496. );
  497. #else
  498. const int l1= stride;
  499. const int l2= stride + l1;
  500. const int l3= stride + l2;
  501. const int l4= stride + l3;
  502. const int l5= stride + l4;
  503. const int l6= stride + l5;
  504. const int l7= stride + l6;
  505. const int l8= stride + l7;
  506. const int l9= stride + l8;
  507. int x;
  508. src+= stride*3;
  509. for(x=0; x<BLOCK_SIZE; x++)
  510. {
  511. const int first= ABS(src[0] - src[l1]) < QP ? src[0] : src[l1];
  512. const int last= ABS(src[l8] - src[l9]) < QP ? src[l9] : src[l8];
  513. int sums[9];
  514. sums[0] = first + src[l1];
  515. sums[1] = src[l1] + src[l2];
  516. sums[2] = src[l2] + src[l3];
  517. sums[3] = src[l3] + src[l4];
  518. sums[4] = src[l4] + src[l5];
  519. sums[5] = src[l5] + src[l6];
  520. sums[6] = src[l6] + src[l7];
  521. sums[7] = src[l7] + src[l8];
  522. sums[8] = src[l8] + last;
  523. src[l1]= ((sums[0]<<2) + ((first + sums[2])<<1) + sums[4] + 8)>>4;
  524. src[l2]= ((src[l2]<<2) + ((first + sums[0] + sums[3])<<1) + sums[5] + 8)>>4;
  525. src[l3]= ((src[l3]<<2) + ((first + sums[1] + sums[4])<<1) + sums[6] + 8)>>4;
  526. src[l4]= ((src[l4]<<2) + ((sums[2] + sums[5])<<1) + sums[0] + sums[7] + 8)>>4;
  527. src[l5]= ((src[l5]<<2) + ((sums[3] + sums[6])<<1) + sums[1] + sums[8] + 8)>>4;
  528. src[l6]= ((src[l6]<<2) + ((last + sums[7] + sums[4])<<1) + sums[2] + 8)>>4;
  529. src[l7]= (((last + src[l7])<<2) + ((src[l8] + sums[5])<<1) + sums[3] + 8)>>4;
  530. src[l8]= ((sums[8]<<2) + ((last + sums[6])<<1) + sums[4] + 8)>>4;
  531. src++;
  532. }
  533. #endif
  534. }
  535. /**
  536. * Experimental implementation of the filter (Algorithm 1) described in a paper from Ramkishor & Karandikar
  537. * values are correctly clipped (MMX2)
  538. * values are wraparound (C)
  539. * conclusion: its fast, but introduces ugly horizontal patterns if there is a continious gradient
  540. 0 8 16 24
  541. x = 8
  542. x/2 = 4
  543. x/8 = 1
  544. 1 12 12 23
  545. */
  546. static inline void vertRK1Filter(uint8_t *src, int stride, int QP)
  547. {
  548. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  549. src+= stride*3;
  550. // FIXME rounding
  551. asm volatile(
  552. "pxor %%mm7, %%mm7 \n\t" // 0
  553. "movq b80, %%mm6 \n\t" // MIN_SIGNED_BYTE
  554. "leal (%0, %1), %%eax \n\t"
  555. "leal (%%eax, %1, 4), %%ebx \n\t"
  556. // 0 1 2 3 4 5 6 7 8 9
  557. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  558. "movq pQPb, %%mm0 \n\t" // QP,..., QP
  559. "movq %%mm0, %%mm1 \n\t" // QP,..., QP
  560. "paddusb b02, %%mm0 \n\t"
  561. "psrlw $2, %%mm0 \n\t"
  562. "pand b3F, %%mm0 \n\t" // QP/4,..., QP/4
  563. "paddusb %%mm1, %%mm0 \n\t" // QP*1.25 ...
  564. "movq (%0, %1, 4), %%mm2 \n\t" // line 4
  565. "movq (%%ebx), %%mm3 \n\t" // line 5
  566. "movq %%mm2, %%mm4 \n\t" // line 4
  567. "pcmpeqb %%mm5, %%mm5 \n\t" // -1
  568. "pxor %%mm2, %%mm5 \n\t" // -line 4 - 1
  569. PAVGB(%%mm3, %%mm5)
  570. "paddb %%mm6, %%mm5 \n\t" // (l5-l4)/2
  571. "psubusb %%mm3, %%mm4 \n\t"
  572. "psubusb %%mm2, %%mm3 \n\t"
  573. "por %%mm3, %%mm4 \n\t" // |l4 - l5|
  574. "psubusb %%mm0, %%mm4 \n\t"
  575. "pcmpeqb %%mm7, %%mm4 \n\t"
  576. "pand %%mm4, %%mm5 \n\t" // d/2
  577. // "paddb %%mm6, %%mm2 \n\t" // line 4 + 0x80
  578. "paddb %%mm5, %%mm2 \n\t"
  579. // "psubb %%mm6, %%mm2 \n\t"
  580. "movq %%mm2, (%0,%1, 4) \n\t"
  581. "movq (%%ebx), %%mm2 \n\t"
  582. // "paddb %%mm6, %%mm2 \n\t" // line 5 + 0x80
  583. "psubb %%mm5, %%mm2 \n\t"
  584. // "psubb %%mm6, %%mm2 \n\t"
  585. "movq %%mm2, (%%ebx) \n\t"
  586. "paddb %%mm6, %%mm5 \n\t"
  587. "psrlw $2, %%mm5 \n\t"
  588. "pand b3F, %%mm5 \n\t"
  589. "psubb b20, %%mm5 \n\t" // (l5-l4)/8
  590. "movq (%%eax, %1, 2), %%mm2 \n\t"
  591. "paddb %%mm6, %%mm2 \n\t" // line 3 + 0x80
  592. "paddsb %%mm5, %%mm2 \n\t"
  593. "psubb %%mm6, %%mm2 \n\t"
  594. "movq %%mm2, (%%eax, %1, 2) \n\t"
  595. "movq (%%ebx, %1), %%mm2 \n\t"
  596. "paddb %%mm6, %%mm2 \n\t" // line 6 + 0x80
  597. "psubsb %%mm5, %%mm2 \n\t"
  598. "psubb %%mm6, %%mm2 \n\t"
  599. "movq %%mm2, (%%ebx, %1) \n\t"
  600. :
  601. : "r" (src), "r" (stride)
  602. : "%eax", "%ebx"
  603. );
  604. #else
  605. const int l1= stride;
  606. const int l2= stride + l1;
  607. const int l3= stride + l2;
  608. const int l4= stride + l3;
  609. const int l5= stride + l4;
  610. const int l6= stride + l5;
  611. // const int l7= stride + l6;
  612. // const int l8= stride + l7;
  613. // const int l9= stride + l8;
  614. int x;
  615. const int QP15= QP + (QP>>2);
  616. src+= stride*3;
  617. for(x=0; x<BLOCK_SIZE; x++)
  618. {
  619. const int v = (src[x+l5] - src[x+l4]);
  620. if(ABS(v) < QP15)
  621. {
  622. src[x+l3] +=v>>3;
  623. src[x+l4] +=v>>1;
  624. src[x+l5] -=v>>1;
  625. src[x+l6] -=v>>3;
  626. }
  627. }
  628. #endif
  629. }
  630. /**
  631. * Experimental Filter 1
  632. * will not damage linear gradients
  633. * Flat blocks should look like they where passed through the (1,1,2,2,4,2,2,1,1) 9-Tap filter
  634. * can only smooth blocks at the expected locations (it cant smooth them if they did move)
  635. * MMX2 version does correct clipping C version doesnt
  636. */
  637. static inline void vertX1Filter(uint8_t *src, int stride, int QP)
  638. {
  639. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  640. src+= stride*3;
  641. asm volatile(
  642. "pxor %%mm7, %%mm7 \n\t" // 0
  643. // "movq b80, %%mm6 \n\t" // MIN_SIGNED_BYTE
  644. "leal (%0, %1), %%eax \n\t"
  645. "leal (%%eax, %1, 4), %%ebx \n\t"
  646. // 0 1 2 3 4 5 6 7 8 9
  647. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  648. "movq (%%eax, %1, 2), %%mm0 \n\t" // line 3
  649. "movq (%0, %1, 4), %%mm1 \n\t" // line 4
  650. "movq %%mm1, %%mm2 \n\t" // line 4
  651. "psubusb %%mm0, %%mm1 \n\t"
  652. "psubusb %%mm2, %%mm0 \n\t"
  653. "por %%mm1, %%mm0 \n\t" // |l2 - l3|
  654. "movq (%%ebx), %%mm3 \n\t" // line 5
  655. "movq (%%ebx, %1), %%mm4 \n\t" // line 6
  656. "movq %%mm3, %%mm5 \n\t" // line 5
  657. "psubusb %%mm4, %%mm3 \n\t"
  658. "psubusb %%mm5, %%mm4 \n\t"
  659. "por %%mm4, %%mm3 \n\t" // |l5 - l6|
  660. PAVGB(%%mm3, %%mm0) // (|l2 - l3| + |l5 - l6|)/2
  661. "movq %%mm2, %%mm1 \n\t" // line 4
  662. "psubusb %%mm5, %%mm2 \n\t"
  663. "movq %%mm2, %%mm4 \n\t"
  664. "pcmpeqb %%mm7, %%mm2 \n\t" // (l4 - l5) <= 0 ? -1 : 0
  665. "psubusb %%mm1, %%mm5 \n\t"
  666. "por %%mm5, %%mm4 \n\t" // |l4 - l5|
  667. "psubusb %%mm0, %%mm4 \n\t" //d = MAX(0, |l4-l5| - (|l2-l3| + |l5-l6|)/2)
  668. "movq %%mm4, %%mm3 \n\t" // d
  669. "psubusb pQPb, %%mm4 \n\t"
  670. "pcmpeqb %%mm7, %%mm4 \n\t" // d <= QP ? -1 : 0
  671. "psubusb b01, %%mm3 \n\t"
  672. "pand %%mm4, %%mm3 \n\t" // d <= QP ? d : 0
  673. PAVGB(%%mm7, %%mm3) // d/2
  674. "movq %%mm3, %%mm1 \n\t" // d/2
  675. PAVGB(%%mm7, %%mm3) // d/4
  676. PAVGB(%%mm1, %%mm3) // 3*d/8
  677. "movq (%0, %1, 4), %%mm0 \n\t" // line 4
  678. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l4-1 : l4
  679. "psubusb %%mm3, %%mm0 \n\t"
  680. "pxor %%mm2, %%mm0 \n\t"
  681. "movq %%mm0, (%0, %1, 4) \n\t" // line 4
  682. "movq (%%ebx), %%mm0 \n\t" // line 5
  683. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l5-1 : l5
  684. "paddusb %%mm3, %%mm0 \n\t"
  685. "pxor %%mm2, %%mm0 \n\t"
  686. "movq %%mm0, (%%ebx) \n\t" // line 5
  687. PAVGB(%%mm7, %%mm1) // d/4
  688. "movq (%%eax, %1, 2), %%mm0 \n\t" // line 3
  689. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l4-1 : l4
  690. "psubusb %%mm1, %%mm0 \n\t"
  691. "pxor %%mm2, %%mm0 \n\t"
  692. "movq %%mm0, (%%eax, %1, 2) \n\t" // line 3
  693. "movq (%%ebx, %1), %%mm0 \n\t" // line 6
  694. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l5-1 : l5
  695. "paddusb %%mm1, %%mm0 \n\t"
  696. "pxor %%mm2, %%mm0 \n\t"
  697. "movq %%mm0, (%%ebx, %1) \n\t" // line 6
  698. PAVGB(%%mm7, %%mm1) // d/8
  699. "movq (%%eax, %1), %%mm0 \n\t" // line 2
  700. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l2-1 : l2
  701. "psubusb %%mm1, %%mm0 \n\t"
  702. "pxor %%mm2, %%mm0 \n\t"
  703. "movq %%mm0, (%%eax, %1) \n\t" // line 2
  704. "movq (%%ebx, %1, 2), %%mm0 \n\t" // line 7
  705. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l7-1 : l7
  706. "paddusb %%mm1, %%mm0 \n\t"
  707. "pxor %%mm2, %%mm0 \n\t"
  708. "movq %%mm0, (%%ebx, %1, 2) \n\t" // line 7
  709. :
  710. : "r" (src), "r" (stride)
  711. : "%eax", "%ebx"
  712. );
  713. #else
  714. const int l1= stride;
  715. const int l2= stride + l1;
  716. const int l3= stride + l2;
  717. const int l4= stride + l3;
  718. const int l5= stride + l4;
  719. const int l6= stride + l5;
  720. const int l7= stride + l6;
  721. // const int l8= stride + l7;
  722. // const int l9= stride + l8;
  723. int x;
  724. src+= stride*3;
  725. for(x=0; x<BLOCK_SIZE; x++)
  726. {
  727. int a= src[l3] - src[l4];
  728. int b= src[l4] - src[l5];
  729. int c= src[l5] - src[l6];
  730. int d= ABS(b) - ((ABS(a) + ABS(c))>>1);
  731. d= MAX(d, 0);
  732. if(d < QP)
  733. {
  734. int v = d * SIGN(-b);
  735. src[l2] +=v>>3;
  736. src[l3] +=v>>2;
  737. src[l4] +=(3*v)>>3;
  738. src[l5] -=(3*v)>>3;
  739. src[l6] -=v>>2;
  740. src[l7] -=v>>3;
  741. }
  742. src++;
  743. }
  744. /*
  745. const int l1= stride;
  746. const int l2= stride + l1;
  747. const int l3= stride + l2;
  748. const int l4= stride + l3;
  749. const int l5= stride + l4;
  750. const int l6= stride + l5;
  751. const int l7= stride + l6;
  752. const int l8= stride + l7;
  753. const int l9= stride + l8;
  754. for(int x=0; x<BLOCK_SIZE; x++)
  755. {
  756. int v2= src[l2];
  757. int v3= src[l3];
  758. int v4= src[l4];
  759. int v5= src[l5];
  760. int v6= src[l6];
  761. int v7= src[l7];
  762. if(ABS(v4-v5)<QP && ABS(v4-v5) - (ABS(v3-v4) + ABS(v5-v6))>0 )
  763. {
  764. src[l3] = (6*v2 + 4*v3 + 3*v4 + 2*v5 + v6 )/16;
  765. src[l4] = (3*v2 + 3*v3 + 4*v4 + 3*v5 + 2*v6 + v7 )/16;
  766. src[l5] = (1*v2 + 2*v3 + 3*v4 + 4*v5 + 3*v6 + 3*v7)/16;
  767. src[l6] = ( 1*v3 + 2*v4 + 3*v5 + 4*v6 + 6*v7)/16;
  768. }
  769. src++;
  770. }
  771. */
  772. #endif
  773. }
  774. /**
  775. * Experimental Filter 1 (Horizontal)
  776. * will not damage linear gradients
  777. * Flat blocks should look like they where passed through the (1,1,2,2,4,2,2,1,1) 9-Tap filter
  778. * can only smooth blocks at the expected locations (it cant smooth them if they did move)
  779. * MMX2 version does correct clipping C version doesnt
  780. * not identical with the vertical one
  781. */
  782. static inline void horizX1Filter(uint8_t *src, int stride, int QP)
  783. {
  784. int y;
  785. static uint64_t *lut= NULL;
  786. if(lut==NULL)
  787. {
  788. int i;
  789. lut= (uint64_t*)memalign(8, 256*8);
  790. for(i=0; i<256; i++)
  791. {
  792. int v= i < 128 ? 2*i : 2*(i-256);
  793. /*
  794. //Simulate 112242211 9-Tap filter
  795. uint64_t a= (v/16) & 0xFF;
  796. uint64_t b= (v/8) & 0xFF;
  797. uint64_t c= (v/4) & 0xFF;
  798. uint64_t d= (3*v/8) & 0xFF;
  799. */
  800. //Simulate piecewise linear interpolation
  801. uint64_t a= (v/16) & 0xFF;
  802. uint64_t b= (v*3/16) & 0xFF;
  803. uint64_t c= (v*5/16) & 0xFF;
  804. uint64_t d= (7*v/16) & 0xFF;
  805. uint64_t A= (0x100 - a)&0xFF;
  806. uint64_t B= (0x100 - b)&0xFF;
  807. uint64_t C= (0x100 - c)&0xFF;
  808. uint64_t D= (0x100 - c)&0xFF;
  809. lut[i] = (a<<56) | (b<<48) | (c<<40) | (d<<32) |
  810. (D<<24) | (C<<16) | (B<<8) | (A);
  811. //lut[i] = (v<<32) | (v<<24);
  812. }
  813. }
  814. #if 0
  815. asm volatile(
  816. "pxor %%mm7, %%mm7 \n\t" // 0
  817. // "movq b80, %%mm6 \n\t" // MIN_SIGNED_BYTE
  818. "leal (%0, %1), %%eax \n\t"
  819. "leal (%%eax, %1, 4), %%ebx \n\t"
  820. "movq b80, %%mm6 \n\t"
  821. "movd pQPb, %%mm5 \n\t" // QP
  822. "movq %%mm5, %%mm4 \n\t"
  823. "paddusb %%mm5, %%mm5 \n\t" // 2QP
  824. "paddusb %%mm5, %%mm4 \n\t" // 3QP
  825. "pxor %%mm5, %%mm5 \n\t" // 0
  826. "psubb %%mm4, %%mm5 \n\t" // -3QP
  827. "por bm11111110, %%mm5 \n\t" // ...,FF,FF,-3QP
  828. "psllq $24, %%mm5 \n\t"
  829. // 0 1 2 3 4 5 6 7 8 9
  830. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  831. #define HX1old(a) \
  832. "movd " #a ", %%mm0 \n\t"\
  833. "movd 4" #a ", %%mm1 \n\t"\
  834. "punpckldq %%mm1, %%mm0 \n\t"\
  835. "movq %%mm0, %%mm1 \n\t"\
  836. "movq %%mm0, %%mm2 \n\t"\
  837. "psrlq $8, %%mm1 \n\t"\
  838. "psubusb %%mm1, %%mm2 \n\t"\
  839. "psubusb %%mm0, %%mm1 \n\t"\
  840. "por %%mm2, %%mm1 \n\t" /* p´x = |px - p(x+1)| */\
  841. "pcmpeqb %%mm7, %%mm2 \n\t" /* p´x = sgn[px - p(x+1)] */\
  842. "pshufw $0x00, %%mm1, %%mm3 \n\t" /* p´5 = |p1 - p2| */\
  843. PAVGB(%%mm1, %%mm3) /* p´5 = (|p2-p1| + |p6-p5|)/2 */\
  844. "psrlq $16, %%mm3 \n\t" /* p´3 = (|p2-p1| + |p6-p5|)/2 */\
  845. "psubusb %%mm3, %%mm1 \n\t" /* |p3-p4|-(|p2-p1| + |p6-p5|)/2 */\
  846. "paddb %%mm5, %%mm1 \n\t"\
  847. "psubusb %%mm5, %%mm1 \n\t"\
  848. PAVGB(%%mm7, %%mm1)\
  849. "pxor %%mm2, %%mm1 \n\t"\
  850. "psubb %%mm2, %%mm1 \n\t"\
  851. "psrlq $24, %%mm1 \n\t"\
  852. "movd %%mm1, %%ecx \n\t"\
  853. "paddb %%mm6, %%mm0 \n\t"\
  854. "paddsb (%3, %%ecx, 8), %%mm0 \n\t"\
  855. "paddb %%mm6, %%mm0 \n\t"\
  856. "movq %%mm0, " #a " \n\t"\
  857. /*
  858. HX1old((%0))
  859. HX1old((%%eax))
  860. HX1old((%%eax, %1))
  861. HX1old((%%eax, %1, 2))
  862. HX1old((%0, %1, 4))
  863. HX1old((%%ebx))
  864. HX1old((%%ebx, %1))
  865. HX1old((%%ebx, %1, 2))
  866. */
  867. //FIXME add some comments, its unreadable ...
  868. #define HX1b(a, c, b, d) \
  869. "movd " #a ", %%mm0 \n\t"\
  870. "movd 4" #a ", %%mm1 \n\t"\
  871. "punpckldq %%mm1, %%mm0 \n\t"\
  872. "movd " #b ", %%mm4 \n\t"\
  873. "movq %%mm0, %%mm1 \n\t"\
  874. "movq %%mm0, %%mm2 \n\t"\
  875. "psrlq $8, %%mm1 \n\t"\
  876. "movd 4" #b ", %%mm3 \n\t"\
  877. "psubusb %%mm1, %%mm2 \n\t"\
  878. "psubusb %%mm0, %%mm1 \n\t"\
  879. "por %%mm2, %%mm1 \n\t" /* p´x = |px - p(x+1)| */\
  880. "pcmpeqb %%mm7, %%mm2 \n\t" /* p´x = sgn[px - p(x+1)] */\
  881. "punpckldq %%mm3, %%mm4 \n\t"\
  882. "movq %%mm1, %%mm3 \n\t"\
  883. "psllq $32, %%mm3 \n\t" /* p´5 = |p1 - p2| */\
  884. PAVGB(%%mm1, %%mm3) /* p´5 = (|p2-p1| + |p6-p5|)/2 */\
  885. "paddb %%mm6, %%mm0 \n\t"\
  886. "psrlq $16, %%mm3 \n\t" /* p´3 = (|p2-p1| + |p6-p5|)/2 */\
  887. "psubusb %%mm3, %%mm1 \n\t" /* |p3-p4|-(|p2-p1| + |p6-p5|)/2 */\
  888. "movq %%mm4, %%mm3 \n\t"\
  889. "paddb %%mm5, %%mm1 \n\t"\
  890. "psubusb %%mm5, %%mm1 \n\t"\
  891. "psrlq $8, %%mm3 \n\t"\
  892. PAVGB(%%mm7, %%mm1)\
  893. "pxor %%mm2, %%mm1 \n\t"\
  894. "psubb %%mm2, %%mm1 \n\t"\
  895. "movq %%mm4, %%mm2 \n\t"\
  896. "psrlq $24, %%mm1 \n\t"\
  897. "psubusb %%mm3, %%mm2 \n\t"\
  898. "movd %%mm1, %%ecx \n\t"\
  899. "psubusb %%mm4, %%mm3 \n\t"\
  900. "paddsb (%2, %%ecx, 8), %%mm0 \n\t"\
  901. "por %%mm2, %%mm3 \n\t" /* p´x = |px - p(x+1)| */\
  902. "paddb %%mm6, %%mm0 \n\t"\
  903. "pcmpeqb %%mm7, %%mm2 \n\t" /* p´x = sgn[px - p(x+1)] */\
  904. "movq %%mm3, %%mm1 \n\t"\
  905. "psllq $32, %%mm1 \n\t" /* p´5 = |p1 - p2| */\
  906. "movq %%mm0, " #a " \n\t"\
  907. PAVGB(%%mm3, %%mm1) /* p´5 = (|p2-p1| + |p6-p5|)/2 */\
  908. "paddb %%mm6, %%mm4 \n\t"\
  909. "psrlq $16, %%mm1 \n\t" /* p´3 = (|p2-p1| + |p6-p5|)/2 */\
  910. "psubusb %%mm1, %%mm3 \n\t" /* |p3-p4|-(|p2-p1| + |p6-p5|)/2 */\
  911. "paddb %%mm5, %%mm3 \n\t"\
  912. "psubusb %%mm5, %%mm3 \n\t"\
  913. PAVGB(%%mm7, %%mm3)\
  914. "pxor %%mm2, %%mm3 \n\t"\
  915. "psubb %%mm2, %%mm3 \n\t"\
  916. "psrlq $24, %%mm3 \n\t"\
  917. "movd " #c ", %%mm0 \n\t"\
  918. "movd 4" #c ", %%mm1 \n\t"\
  919. "punpckldq %%mm1, %%mm0 \n\t"\
  920. "paddb %%mm6, %%mm0 \n\t"\
  921. "paddsb (%2, %%ecx, 8), %%mm0 \n\t"\
  922. "paddb %%mm6, %%mm0 \n\t"\
  923. "movq %%mm0, " #c " \n\t"\
  924. "movd %%mm3, %%ecx \n\t"\
  925. "movd " #d ", %%mm0 \n\t"\
  926. "paddsb (%2, %%ecx, 8), %%mm4 \n\t"\
  927. "movd 4" #d ", %%mm1 \n\t"\
  928. "paddb %%mm6, %%mm4 \n\t"\
  929. "punpckldq %%mm1, %%mm0 \n\t"\
  930. "movq %%mm4, " #b " \n\t"\
  931. "paddb %%mm6, %%mm0 \n\t"\
  932. "paddsb (%2, %%ecx, 8), %%mm0 \n\t"\
  933. "paddb %%mm6, %%mm0 \n\t"\
  934. "movq %%mm0, " #d " \n\t"\
  935. HX1b((%0),(%%eax),(%%eax, %1),(%%eax, %1, 2))
  936. HX1b((%0, %1, 4),(%%ebx),(%%ebx, %1),(%%ebx, %1, 2))
  937. :
  938. : "r" (src), "r" (stride), "r" (lut)
  939. : "%eax", "%ebx", "%ecx"
  940. );
  941. #else
  942. //FIXME (has little in common with the mmx2 version)
  943. for(y=0; y<BLOCK_SIZE; y++)
  944. {
  945. int a= src[1] - src[2];
  946. int b= src[3] - src[4];
  947. int c= src[5] - src[6];
  948. int d= MAX(ABS(b) - (ABS(a) + ABS(c))/2, 0);
  949. if(d < QP)
  950. {
  951. int v = d * SIGN(-b);
  952. src[1] +=v/8;
  953. src[2] +=v/4;
  954. src[3] +=3*v/8;
  955. src[4] -=3*v/8;
  956. src[5] -=v/4;
  957. src[6] -=v/8;
  958. }
  959. src+=stride;
  960. }
  961. #endif
  962. }
  963. static inline void doVertDefFilter(uint8_t src[], int stride, int QP)
  964. {
  965. #ifdef HAVE_MMX
  966. src+= stride*4;
  967. //FIXME try pmul for *5 stuff
  968. // src[0]=0;
  969. asm volatile(
  970. "pxor %%mm7, %%mm7 \n\t"
  971. "leal (%0, %1), %%eax \n\t"
  972. "leal (%%eax, %1, 4), %%ebx \n\t"
  973. // 0 1 2 3 4 5 6 7
  974. // %0 %0+%1 %0+2%1 eax+2%1 %0+4%1 eax+4%1 ebx+%1 ebx+2%1
  975. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1
  976. "movq (%0), %%mm0 \n\t"
  977. "movq %%mm0, %%mm1 \n\t"
  978. "punpcklbw %%mm7, %%mm0 \n\t" // low part of line 0
  979. "punpckhbw %%mm7, %%mm1 \n\t" // high part of line 0
  980. "movq (%%eax), %%mm2 \n\t"
  981. "movq %%mm2, %%mm3 \n\t"
  982. "punpcklbw %%mm7, %%mm2 \n\t" // low part of line 1
  983. "punpckhbw %%mm7, %%mm3 \n\t" // high part of line 1
  984. "movq (%%eax, %1), %%mm4 \n\t"
  985. "movq %%mm4, %%mm5 \n\t"
  986. "punpcklbw %%mm7, %%mm4 \n\t" // low part of line 2
  987. "punpckhbw %%mm7, %%mm5 \n\t" // high part of line 2
  988. "paddw %%mm0, %%mm0 \n\t" // 2L0
  989. "paddw %%mm1, %%mm1 \n\t" // 2H0
  990. "psubw %%mm4, %%mm2 \n\t" // L1 - L2
  991. "psubw %%mm5, %%mm3 \n\t" // H1 - H2
  992. "psubw %%mm2, %%mm0 \n\t" // 2L0 - L1 + L2
  993. "psubw %%mm3, %%mm1 \n\t" // 2H0 - H1 + H2
  994. "psllw $2, %%mm2 \n\t" // 4L1 - 4L2
  995. "psllw $2, %%mm3 \n\t" // 4H1 - 4H2
  996. "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2
  997. "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2
  998. "movq (%%eax, %1, 2), %%mm2 \n\t"
  999. "movq %%mm2, %%mm3 \n\t"
  1000. "punpcklbw %%mm7, %%mm2 \n\t" // L3
  1001. "punpckhbw %%mm7, %%mm3 \n\t" // H3
  1002. "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2 - L3
  1003. "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2 - H3
  1004. "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
  1005. "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
  1006. "movq %%mm0, temp0 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
  1007. "movq %%mm1, temp1 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
  1008. "movq (%0, %1, 4), %%mm0 \n\t"
  1009. "movq %%mm0, %%mm1 \n\t"
  1010. "punpcklbw %%mm7, %%mm0 \n\t" // L4
  1011. "punpckhbw %%mm7, %%mm1 \n\t" // H4
  1012. "psubw %%mm0, %%mm2 \n\t" // L3 - L4
  1013. "psubw %%mm1, %%mm3 \n\t" // H3 - H4
  1014. "movq %%mm2, temp2 \n\t" // L3 - L4
  1015. "movq %%mm3, temp3 \n\t" // H3 - H4
  1016. "paddw %%mm4, %%mm4 \n\t" // 2L2
  1017. "paddw %%mm5, %%mm5 \n\t" // 2H2
  1018. "psubw %%mm2, %%mm4 \n\t" // 2L2 - L3 + L4
  1019. "psubw %%mm3, %%mm5 \n\t" // 2H2 - H3 + H4
  1020. "psllw $2, %%mm2 \n\t" // 4L3 - 4L4
  1021. "psllw $2, %%mm3 \n\t" // 4H3 - 4H4
  1022. "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4
  1023. "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4
  1024. //50 opcodes so far
  1025. "movq (%%ebx), %%mm2 \n\t"
  1026. "movq %%mm2, %%mm3 \n\t"
  1027. "punpcklbw %%mm7, %%mm2 \n\t" // L5
  1028. "punpckhbw %%mm7, %%mm3 \n\t" // H5
  1029. "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4 - L5
  1030. "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4 - H5
  1031. "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4 - 2L5
  1032. "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4 - 2H5
  1033. "movq (%%ebx, %1), %%mm6 \n\t"
  1034. "punpcklbw %%mm7, %%mm6 \n\t" // L6
  1035. "psubw %%mm6, %%mm2 \n\t" // L5 - L6
  1036. "movq (%%ebx, %1), %%mm6 \n\t"
  1037. "punpckhbw %%mm7, %%mm6 \n\t" // H6
  1038. "psubw %%mm6, %%mm3 \n\t" // H5 - H6
  1039. "paddw %%mm0, %%mm0 \n\t" // 2L4
  1040. "paddw %%mm1, %%mm1 \n\t" // 2H4
  1041. "psubw %%mm2, %%mm0 \n\t" // 2L4 - L5 + L6
  1042. "psubw %%mm3, %%mm1 \n\t" // 2H4 - H5 + H6
  1043. "psllw $2, %%mm2 \n\t" // 4L5 - 4L6
  1044. "psllw $2, %%mm3 \n\t" // 4H5 - 4H6
  1045. "psubw %%mm2, %%mm0 \n\t" // 2L4 - 5L5 + 5L6
  1046. "psubw %%mm3, %%mm1 \n\t" // 2H4 - 5H5 + 5H6
  1047. "movq (%%ebx, %1, 2), %%mm2 \n\t"
  1048. "movq %%mm2, %%mm3 \n\t"
  1049. "punpcklbw %%mm7, %%mm2 \n\t" // L7
  1050. "punpckhbw %%mm7, %%mm3 \n\t" // H7
  1051. "paddw %%mm2, %%mm2 \n\t" // 2L7
  1052. "paddw %%mm3, %%mm3 \n\t" // 2H7
  1053. "psubw %%mm2, %%mm0 \n\t" // 2L4 - 5L5 + 5L6 - 2L7
  1054. "psubw %%mm3, %%mm1 \n\t" // 2H4 - 5H5 + 5H6 - 2H7
  1055. "movq temp0, %%mm2 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
  1056. "movq temp1, %%mm3 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
  1057. #ifdef HAVE_MMX2
  1058. "movq %%mm7, %%mm6 \n\t" // 0
  1059. "psubw %%mm0, %%mm6 \n\t"
  1060. "pmaxsw %%mm6, %%mm0 \n\t" // |2L4 - 5L5 + 5L6 - 2L7|
  1061. "movq %%mm7, %%mm6 \n\t" // 0
  1062. "psubw %%mm1, %%mm6 \n\t"
  1063. "pmaxsw %%mm6, %%mm1 \n\t" // |2H4 - 5H5 + 5H6 - 2H7|
  1064. "movq %%mm7, %%mm6 \n\t" // 0
  1065. "psubw %%mm2, %%mm6 \n\t"
  1066. "pmaxsw %%mm6, %%mm2 \n\t" // |2L0 - 5L1 + 5L2 - 2L3|
  1067. "movq %%mm7, %%mm6 \n\t" // 0
  1068. "psubw %%mm3, %%mm6 \n\t"
  1069. "pmaxsw %%mm6, %%mm3 \n\t" // |2H0 - 5H1 + 5H2 - 2H3|
  1070. #else
  1071. "movq %%mm7, %%mm6 \n\t" // 0
  1072. "pcmpgtw %%mm0, %%mm6 \n\t"
  1073. "pxor %%mm6, %%mm0 \n\t"
  1074. "psubw %%mm6, %%mm0 \n\t" // |2L4 - 5L5 + 5L6 - 2L7|
  1075. "movq %%mm7, %%mm6 \n\t" // 0
  1076. "pcmpgtw %%mm1, %%mm6 \n\t"
  1077. "pxor %%mm6, %%mm1 \n\t"
  1078. "psubw %%mm6, %%mm1 \n\t" // |2H4 - 5H5 + 5H6 - 2H7|
  1079. "movq %%mm7, %%mm6 \n\t" // 0
  1080. "pcmpgtw %%mm2, %%mm6 \n\t"
  1081. "pxor %%mm6, %%mm2 \n\t"
  1082. "psubw %%mm6, %%mm2 \n\t" // |2L0 - 5L1 + 5L2 - 2L3|
  1083. "movq %%mm7, %%mm6 \n\t" // 0
  1084. "pcmpgtw %%mm3, %%mm6 \n\t"
  1085. "pxor %%mm6, %%mm3 \n\t"
  1086. "psubw %%mm6, %%mm3 \n\t" // |2H0 - 5H1 + 5H2 - 2H3|
  1087. #endif
  1088. #ifdef HAVE_MMX2
  1089. "pminsw %%mm2, %%mm0 \n\t"
  1090. "pminsw %%mm3, %%mm1 \n\t"
  1091. #else
  1092. "movq %%mm0, %%mm6 \n\t"
  1093. "psubusw %%mm2, %%mm6 \n\t"
  1094. "psubw %%mm6, %%mm0 \n\t"
  1095. "movq %%mm1, %%mm6 \n\t"
  1096. "psubusw %%mm3, %%mm6 \n\t"
  1097. "psubw %%mm6, %%mm1 \n\t"
  1098. #endif
  1099. "movq %%mm7, %%mm6 \n\t" // 0
  1100. "pcmpgtw %%mm4, %%mm6 \n\t" // sign(2L2 - 5L3 + 5L4 - 2L5)
  1101. "pxor %%mm6, %%mm4 \n\t"
  1102. "psubw %%mm6, %%mm4 \n\t" // |2L2 - 5L3 + 5L4 - 2L5|
  1103. "pcmpgtw %%mm5, %%mm7 \n\t" // sign(2H2 - 5H3 + 5H4 - 2H5)
  1104. "pxor %%mm7, %%mm5 \n\t"
  1105. "psubw %%mm7, %%mm5 \n\t" // |2H2 - 5H3 + 5H4 - 2H5|
  1106. // 100 opcodes
  1107. "movd %2, %%mm2 \n\t" // QP
  1108. "punpcklwd %%mm2, %%mm2 \n\t"
  1109. "punpcklwd %%mm2, %%mm2 \n\t"
  1110. "psllw $3, %%mm2 \n\t" // 8QP
  1111. "movq %%mm2, %%mm3 \n\t" // 8QP
  1112. "pcmpgtw %%mm4, %%mm2 \n\t"
  1113. "pcmpgtw %%mm5, %%mm3 \n\t"
  1114. "pand %%mm2, %%mm4 \n\t"
  1115. "pand %%mm3, %%mm5 \n\t"
  1116. "psubusw %%mm0, %%mm4 \n\t" // hd
  1117. "psubusw %%mm1, %%mm5 \n\t" // ld
  1118. "movq w05, %%mm2 \n\t" // 5
  1119. "pmullw %%mm2, %%mm4 \n\t"
  1120. "pmullw %%mm2, %%mm5 \n\t"
  1121. "movq w20, %%mm2 \n\t" // 32
  1122. "paddw %%mm2, %%mm4 \n\t"
  1123. "paddw %%mm2, %%mm5 \n\t"
  1124. "psrlw $6, %%mm4 \n\t"
  1125. "psrlw $6, %%mm5 \n\t"
  1126. /*
  1127. "movq w06, %%mm2 \n\t" // 6
  1128. "paddw %%mm2, %%mm4 \n\t"
  1129. "paddw %%mm2, %%mm5 \n\t"
  1130. "movq w1400, %%mm2 \n\t" // 1400h = 5120 = 5/64*2^16
  1131. //FIXME if *5/64 is supposed to be /13 then we should use 5041 instead of 5120
  1132. "pmulhw %%mm2, %%mm4 \n\t" // hd/13
  1133. "pmulhw %%mm2, %%mm5 \n\t" // ld/13
  1134. */
  1135. "movq temp2, %%mm0 \n\t" // L3 - L4
  1136. "movq temp3, %%mm1 \n\t" // H3 - H4
  1137. "pxor %%mm2, %%mm2 \n\t"
  1138. "pxor %%mm3, %%mm3 \n\t"
  1139. "pcmpgtw %%mm0, %%mm2 \n\t" // sign (L3-L4)
  1140. "pcmpgtw %%mm1, %%mm3 \n\t" // sign (H3-H4)
  1141. "pxor %%mm2, %%mm0 \n\t"
  1142. "pxor %%mm3, %%mm1 \n\t"
  1143. "psubw %%mm2, %%mm0 \n\t" // |L3-L4|
  1144. "psubw %%mm3, %%mm1 \n\t" // |H3-H4|
  1145. "psrlw $1, %%mm0 \n\t" // |L3 - L4|/2
  1146. "psrlw $1, %%mm1 \n\t" // |H3 - H4|/2
  1147. "pxor %%mm6, %%mm2 \n\t"
  1148. "pxor %%mm7, %%mm3 \n\t"
  1149. "pand %%mm2, %%mm4 \n\t"
  1150. "pand %%mm3, %%mm5 \n\t"
  1151. #ifdef HAVE_MMX2
  1152. "pminsw %%mm0, %%mm4 \n\t"
  1153. "pminsw %%mm1, %%mm5 \n\t"
  1154. #else
  1155. "movq %%mm4, %%mm2 \n\t"
  1156. "psubusw %%mm0, %%mm2 \n\t"
  1157. "psubw %%mm2, %%mm4 \n\t"
  1158. "movq %%mm5, %%mm2 \n\t"
  1159. "psubusw %%mm1, %%mm2 \n\t"
  1160. "psubw %%mm2, %%mm5 \n\t"
  1161. #endif
  1162. "pxor %%mm6, %%mm4 \n\t"
  1163. "pxor %%mm7, %%mm5 \n\t"
  1164. "psubw %%mm6, %%mm4 \n\t"
  1165. "psubw %%mm7, %%mm5 \n\t"
  1166. "packsswb %%mm5, %%mm4 \n\t"
  1167. "movq (%%eax, %1, 2), %%mm0 \n\t"
  1168. "paddb %%mm4, %%mm0 \n\t"
  1169. "movq %%mm0, (%%eax, %1, 2) \n\t"
  1170. "movq (%0, %1, 4), %%mm0 \n\t"
  1171. "psubb %%mm4, %%mm0 \n\t"
  1172. "movq %%mm0, (%0, %1, 4) \n\t"
  1173. :
  1174. : "r" (src), "r" (stride), "r" (QP)
  1175. : "%eax", "%ebx"
  1176. );
  1177. #else
  1178. const int l1= stride;
  1179. const int l2= stride + l1;
  1180. const int l3= stride + l2;
  1181. const int l4= stride + l3;
  1182. const int l5= stride + l4;
  1183. const int l6= stride + l5;
  1184. const int l7= stride + l6;
  1185. const int l8= stride + l7;
  1186. // const int l9= stride + l8;
  1187. int x;
  1188. src+= stride*3;
  1189. for(x=0; x<BLOCK_SIZE; x++)
  1190. {
  1191. const int middleEnergy= 5*(src[l5] - src[l4]) + 2*(src[l3] - src[l6]);
  1192. if(ABS(middleEnergy) < 8*QP)
  1193. {
  1194. const int q=(src[l4] - src[l5])/2;
  1195. const int leftEnergy= 5*(src[l3] - src[l2]) + 2*(src[l1] - src[l4]);
  1196. const int rightEnergy= 5*(src[l7] - src[l6]) + 2*(src[l5] - src[l8]);
  1197. int d= ABS(middleEnergy) - MIN( ABS(leftEnergy), ABS(rightEnergy) );
  1198. d= MAX(d, 0);
  1199. d= (5*d + 32) >> 6;
  1200. d*= SIGN(-middleEnergy);
  1201. if(q>0)
  1202. {
  1203. d= d<0 ? 0 : d;
  1204. d= d>q ? q : d;
  1205. }
  1206. else
  1207. {
  1208. d= d>0 ? 0 : d;
  1209. d= d<q ? q : d;
  1210. }
  1211. src[l4]-= d;
  1212. src[l5]+= d;
  1213. }
  1214. src++;
  1215. }
  1216. #endif
  1217. }
  1218. //FIXME? |255-0| = 1
  1219. /**
  1220. * Check if the given 8x8 Block is mostly "flat"
  1221. */
  1222. static inline int isHorizDC(uint8_t src[], int stride)
  1223. {
  1224. // src++;
  1225. int numEq= 0;
  1226. #if 0
  1227. asm volatile (
  1228. // "int $3 \n\t"
  1229. "leal (%1, %2), %%ecx \n\t"
  1230. "leal (%%ecx, %2, 4), %%ebx \n\t"
  1231. // 0 1 2 3 4 5 6 7 8 9
  1232. // %1 ecx ecx+%2 ecx+2%2 %1+4%2 ebx ebx+%2 ebx+2%2 %1+8%2 ebx+4%2
  1233. "movq b7E, %%mm7 \n\t" // mm7 = 0x7F
  1234. "movq b7C, %%mm6 \n\t" // mm6 = 0x7D
  1235. "pxor %%mm0, %%mm0 \n\t"
  1236. "movl %1, %%eax \n\t"
  1237. "andl $0x1F, %%eax \n\t"
  1238. "cmpl $24, %%eax \n\t"
  1239. "leal tempBlock, %%eax \n\t"
  1240. "jb 1f \n\t"
  1241. #define HDC_CHECK_AND_CPY(src, dst) \
  1242. "movd " #src ", %%mm2 \n\t"\
  1243. "punpckldq 4" #src ", %%mm2 \n\t" /* (%1) */\
  1244. "movq %%mm2, %%mm1 \n\t"\
  1245. "psrlq $8, %%mm2 \n\t"\
  1246. "psubb %%mm1, %%mm2 \n\t"\
  1247. "paddb %%mm7, %%mm2 \n\t"\
  1248. "pcmpgtb %%mm6, %%mm2 \n\t"\
  1249. "paddb %%mm2, %%mm0 \n\t"\
  1250. "movq %%mm1," #dst "(%%eax) \n\t"
  1251. HDC_CHECK_AND_CPY((%1),0)
  1252. HDC_CHECK_AND_CPY((%%ecx),8)
  1253. HDC_CHECK_AND_CPY((%%ecx, %2),16)
  1254. HDC_CHECK_AND_CPY((%%ecx, %2, 2),24)
  1255. HDC_CHECK_AND_CPY((%1, %2, 4),32)
  1256. HDC_CHECK_AND_CPY((%%ebx),40)
  1257. HDC_CHECK_AND_CPY((%%ebx, %2),48)
  1258. HDC_CHECK_AND_CPY((%%ebx, %2, 2),56)
  1259. "jmp 2f \n\t"
  1260. "1: \n\t"
  1261. // src does not cross a 32 byte cache line so dont waste time with alignment
  1262. #define HDC_CHECK_AND_CPY2(src, dst) \
  1263. "movq " #src ", %%mm2 \n\t"\
  1264. "movq " #src ", %%mm1 \n\t"\
  1265. "psrlq $8, %%mm2 \n\t"\
  1266. "psubb %%mm1, %%mm2 \n\t"\
  1267. "paddb %%mm7, %%mm2 \n\t"\
  1268. "pcmpgtb %%mm6, %%mm2 \n\t"\
  1269. "paddb %%mm2, %%mm0 \n\t"\
  1270. "movq %%mm1," #dst "(%%eax) \n\t"
  1271. HDC_CHECK_AND_CPY2((%1),0)
  1272. HDC_CHECK_AND_CPY2((%%ecx),8)
  1273. HDC_CHECK_AND_CPY2((%%ecx, %2),16)
  1274. HDC_CHECK_AND_CPY2((%%ecx, %2, 2),24)
  1275. HDC_CHECK_AND_CPY2((%1, %2, 4),32)
  1276. HDC_CHECK_AND_CPY2((%%ebx),40)
  1277. HDC_CHECK_AND_CPY2((%%ebx, %2),48)
  1278. HDC_CHECK_AND_CPY2((%%ebx, %2, 2),56)
  1279. "2: \n\t"
  1280. "psllq $8, %%mm0 \n\t" // remove dummy value
  1281. "movq %%mm0, %%mm1 \n\t"
  1282. "psrlw $8, %%mm0 \n\t"
  1283. "paddb %%mm1, %%mm0 \n\t"
  1284. "movq %%mm0, %%mm1 \n\t"
  1285. "psrlq $16, %%mm0 \n\t"
  1286. "paddb %%mm1, %%mm0 \n\t"
  1287. "movq %%mm0, %%mm1 \n\t"
  1288. "psrlq $32, %%mm0 \n\t"
  1289. "paddb %%mm1, %%mm0 \n\t"
  1290. "movd %%mm0, %0 \n\t"
  1291. : "=r" (numEq)
  1292. : "r" (src), "r" (stride)
  1293. : "%eax", "%ebx", "%ecx"
  1294. );
  1295. // printf("%d\n", numEq);
  1296. numEq= (256 - numEq) &0xFF;
  1297. #else
  1298. int y;
  1299. for(y=0; y<BLOCK_SIZE; y++)
  1300. {
  1301. if(((src[0] - src[1] + 1) & 0xFFFF) < 3) numEq++;
  1302. if(((src[1] - src[2] + 1) & 0xFFFF) < 3) numEq++;
  1303. if(((src[2] - src[3] + 1) & 0xFFFF) < 3) numEq++;
  1304. if(((src[3] - src[4] + 1) & 0xFFFF) < 3) numEq++;
  1305. if(((src[4] - src[5] + 1) & 0xFFFF) < 3) numEq++;
  1306. if(((src[5] - src[6] + 1) & 0xFFFF) < 3) numEq++;
  1307. if(((src[6] - src[7] + 1) & 0xFFFF) < 3) numEq++;
  1308. src+= stride;
  1309. }
  1310. #endif
  1311. /* if(abs(numEq - asmEq) > 0)
  1312. {
  1313. // printf("\nasm:%d c:%d\n", asmEq, numEq);
  1314. for(int y=0; y<8; y++)
  1315. {
  1316. for(int x=0; x<8; x++)
  1317. {
  1318. printf("%d ", src[x + y*stride]);
  1319. }
  1320. printf("\n");
  1321. }
  1322. }
  1323. */
  1324. // printf("%d\n", numEq);
  1325. return numEq > hFlatnessThreshold;
  1326. }
  1327. static inline int isHorizMinMaxOk(uint8_t src[], int stride, int QP)
  1328. {
  1329. if(abs(src[0] - src[7]) > 2*QP) return 0;
  1330. return 1;
  1331. }
  1332. static inline void doHorizDefFilter(uint8_t dst[], int stride, int QP)
  1333. {
  1334. #if 0
  1335. asm volatile(
  1336. "leal (%0, %1), %%ecx \n\t"
  1337. "leal (%%ecx, %1, 4), %%ebx \n\t"
  1338. // 0 1 2 3 4 5 6 7 8 9
  1339. // %0 ecx ecx+%1 ecx+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1340. "pxor %%mm7, %%mm7 \n\t"
  1341. "movq bm00001000, %%mm6 \n\t"
  1342. "movd %2, %%mm5 \n\t" // QP
  1343. "movq %%mm5, %%mm4 \n\t"
  1344. "paddusb %%mm5, %%mm5 \n\t" // 2QP
  1345. "paddusb %%mm5, %%mm4 \n\t" // 3QP
  1346. "psllq $24, %%mm4 \n\t"
  1347. "pxor %%mm5, %%mm5 \n\t" // 0
  1348. "psubb %%mm4, %%mm5 \n\t" // -QP
  1349. "leal tempBlock, %%eax \n\t"
  1350. //FIXME? "unroll by 2" and mix
  1351. #ifdef HAVE_MMX2
  1352. #define HDF(src, dst) \
  1353. "movq " #src "(%%eax), %%mm0 \n\t"\
  1354. "movq " #src "(%%eax), %%mm1 \n\t"\
  1355. "movq " #src "(%%eax), %%mm2 \n\t"\
  1356. "psrlq $8, %%mm1 \n\t"\
  1357. "psubusb %%mm1, %%mm2 \n\t"\
  1358. "psubusb %%mm0, %%mm1 \n\t"\
  1359. "por %%mm2, %%mm1 \n\t" /* p´x = |px - p(x+1)| */\
  1360. "pcmpeqb %%mm7, %%mm2 \n\t" /* p´x = sgn[px - p(x+1)] */\
  1361. "pshufw $0x00, %%mm1, %%mm3 \n\t" /* p´5 = |p1 - p2| */\
  1362. "pminub %%mm1, %%mm3 \n\t" /* p´5 = min(|p2-p1|, |p6-p5|)*/\
  1363. "psrlq $16, %%mm3 \n\t" /* p´3 = min(|p2-p1|, |p6-p5|)*/\
  1364. "psubusb %%mm3, %%mm1 \n\t" /* |p3-p4|-min(|p1-p2|,|p5-p6|) */\
  1365. "paddb %%mm5, %%mm1 \n\t"\
  1366. "psubusb %%mm5, %%mm1 \n\t"\
  1367. "psrlw $2, %%mm1 \n\t"\
  1368. "pxor %%mm2, %%mm1 \n\t"\
  1369. "psubb %%mm2, %%mm1 \n\t"\
  1370. "pand %%mm6, %%mm1 \n\t"\
  1371. "psubb %%mm1, %%mm0 \n\t"\
  1372. "psllq $8, %%mm1 \n\t"\
  1373. "paddb %%mm1, %%mm0 \n\t"\
  1374. "movd %%mm0, " #dst" \n\t"\
  1375. "psrlq $32, %%mm0 \n\t"\
  1376. "movd %%mm0, 4" #dst" \n\t"
  1377. #else
  1378. #define HDF(src, dst)\
  1379. "movq " #src "(%%eax), %%mm0 \n\t"\
  1380. "movq %%mm0, %%mm1 \n\t"\
  1381. "movq %%mm0, %%mm2 \n\t"\
  1382. "psrlq $8, %%mm1 \n\t"\
  1383. "psubusb %%mm1, %%mm2 \n\t"\
  1384. "psubusb %%mm0, %%mm1 \n\t"\
  1385. "por %%mm2, %%mm1 \n\t" /* p´x = |px - p(x+1)| */\
  1386. "pcmpeqb %%mm7, %%mm2 \n\t" /* p´x = sgn[px - p(x+1)] */\
  1387. "movq %%mm1, %%mm3 \n\t"\
  1388. "psllq $32, %%mm3 \n\t"\
  1389. "movq %%mm3, %%mm4 \n\t"\
  1390. "psubusb %%mm1, %%mm4 \n\t"\
  1391. "psubb %%mm4, %%mm3 \n\t"\
  1392. "psrlq $16, %%mm3 \n\t" /* p´3 = min(|p2-p1|, |p6-p5|)*/\
  1393. "psubusb %%mm3, %%mm1 \n\t" /* |p3-p4|-min(|p1-p2|,|p5,ü6|) */\
  1394. "paddb %%mm5, %%mm1 \n\t"\
  1395. "psubusb %%mm5, %%mm1 \n\t"\
  1396. "psrlw $2, %%mm1 \n\t"\
  1397. "pxor %%mm2, %%mm1 \n\t"\
  1398. "psubb %%mm2, %%mm1 \n\t"\
  1399. "pand %%mm6, %%mm1 \n\t"\
  1400. "psubb %%mm1, %%mm0 \n\t"\
  1401. "psllq $8, %%mm1 \n\t"\
  1402. "paddb %%mm1, %%mm0 \n\t"\
  1403. "movd %%mm0, " #dst " \n\t"\
  1404. "psrlq $32, %%mm0 \n\t"\
  1405. "movd %%mm0, 4" #dst " \n\t"
  1406. #endif
  1407. HDF(0,(%0))
  1408. HDF(8,(%%ecx))
  1409. HDF(16,(%%ecx, %1))
  1410. HDF(24,(%%ecx, %1, 2))
  1411. HDF(32,(%0, %1, 4))
  1412. HDF(40,(%%ebx))
  1413. HDF(48,(%%ebx, %1))
  1414. HDF(56,(%%ebx, %1, 2))
  1415. :
  1416. : "r" (dst), "r" (stride), "r" (QP)
  1417. : "%eax", "%ebx", "%ecx"
  1418. );
  1419. #else
  1420. int y;
  1421. for(y=0; y<BLOCK_SIZE; y++)
  1422. {
  1423. const int middleEnergy= 5*(dst[4] - dst[5]) + 2*(dst[2] - dst[5]);
  1424. if(ABS(middleEnergy) < 8*QP)
  1425. {
  1426. const int q=(dst[3] - dst[4])/2;
  1427. const int leftEnergy= 5*(dst[2] - dst[1]) + 2*(dst[0] - dst[3]);
  1428. const int rightEnergy= 5*(dst[6] - dst[5]) + 2*(dst[4] - dst[7]);
  1429. int d= ABS(middleEnergy) - MIN( ABS(leftEnergy), ABS(rightEnergy) );
  1430. d= MAX(d, 0);
  1431. d= (5*d + 32) >> 6;
  1432. d*= SIGN(-middleEnergy);
  1433. if(q>0)
  1434. {
  1435. d= d<0 ? 0 : d;
  1436. d= d>q ? q : d;
  1437. }
  1438. else
  1439. {
  1440. d= d>0 ? 0 : d;
  1441. d= d<q ? q : d;
  1442. }
  1443. dst[3]-= d;
  1444. dst[4]+= d;
  1445. }
  1446. dst+= stride;
  1447. }
  1448. #endif
  1449. }
  1450. /**
  1451. * Do a horizontal low pass filter on the 10x8 block (dst points to middle 8x8 Block)
  1452. * using the 9-Tap Filter (1,1,2,2,4,2,2,1,1)/16 (C version)
  1453. * using the 7-Tap Filter (2,2,2,4,2,2,2)/16 (MMX2/3DNOW version)
  1454. */
  1455. static inline void doHorizLowPass(uint8_t dst[], int stride, int QP)
  1456. {
  1457. #if 0
  1458. asm volatile(
  1459. "leal (%0, %1), %%ecx \n\t"
  1460. "leal (%%ecx, %1, 4), %%ebx \n\t"
  1461. // 0 1 2 3 4 5 6 7 8 9
  1462. // %0 ecx ecx+%1 ecx+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1463. "pxor %%mm7, %%mm7 \n\t"
  1464. "leal tempBlock, %%eax \n\t"
  1465. /*
  1466. #define HLP1 "movq (%0), %%mm0 \n\t"\
  1467. "movq %%mm0, %%mm1 \n\t"\
  1468. "psllq $8, %%mm0 \n\t"\
  1469. PAVGB(%%mm1, %%mm0)\
  1470. "psrlw $8, %%mm0 \n\t"\
  1471. "pxor %%mm1, %%mm1 \n\t"\
  1472. "packuswb %%mm1, %%mm0 \n\t"\
  1473. "movq %%mm0, %%mm1 \n\t"\
  1474. "movq %%mm0, %%mm2 \n\t"\
  1475. "psllq $32, %%mm0 \n\t"\
  1476. "paddb %%mm0, %%mm1 \n\t"\
  1477. "psllq $16, %%mm2 \n\t"\
  1478. PAVGB(%%mm2, %%mm0)\
  1479. "movq %%mm0, %%mm3 \n\t"\
  1480. "pand bm11001100, %%mm0 \n\t"\
  1481. "paddusb %%mm0, %%mm3 \n\t"\
  1482. "psrlq $8, %%mm3 \n\t"\
  1483. PAVGB(%%mm1, %%mm4)\
  1484. PAVGB(%%mm3, %%mm2)\
  1485. "psrlq $16, %%mm2 \n\t"\
  1486. "punpcklbw %%mm2, %%mm2 \n\t"\
  1487. "movq %%mm2, (%0) \n\t"\
  1488. #define HLP2 "movq (%0), %%mm0 \n\t"\
  1489. "movq %%mm0, %%mm1 \n\t"\
  1490. "psllq $8, %%mm0 \n\t"\
  1491. PAVGB(%%mm1, %%mm0)\
  1492. "psrlw $8, %%mm0 \n\t"\
  1493. "pxor %%mm1, %%mm1 \n\t"\
  1494. "packuswb %%mm1, %%mm0 \n\t"\
  1495. "movq %%mm0, %%mm2 \n\t"\
  1496. "psllq $32, %%mm0 \n\t"\
  1497. "psllq $16, %%mm2 \n\t"\
  1498. PAVGB(%%mm2, %%mm0)\
  1499. "movq %%mm0, %%mm3 \n\t"\
  1500. "pand bm11001100, %%mm0 \n\t"\
  1501. "paddusb %%mm0, %%mm3 \n\t"\
  1502. "psrlq $8, %%mm3 \n\t"\
  1503. PAVGB(%%mm3, %%mm2)\
  1504. "psrlq $16, %%mm2 \n\t"\
  1505. "punpcklbw %%mm2, %%mm2 \n\t"\
  1506. "movq %%mm2, (%0) \n\t"\
  1507. */
  1508. // approximately a 7-Tap Filter with Vector (1,2,3,4,3,2,1)/16
  1509. /*
  1510. Implemented Exact 7-Tap
  1511. 9421 A321
  1512. 36421 64321
  1513. 334321 =
  1514. 1234321 =
  1515. 1234321 =
  1516. 123433 =
  1517. 12463 12346
  1518. 1249 123A
  1519. */
  1520. #ifdef HAVE_MMX2
  1521. #define HLP3(i) "movq " #i "(%%eax), %%mm0 \n\t"\
  1522. "movq %%mm0, %%mm1 \n\t"\
  1523. "movq %%mm0, %%mm2 \n\t"\
  1524. "movq %%mm0, %%mm3 \n\t"\
  1525. "movq %%mm0, %%mm4 \n\t"\
  1526. "psllq $8, %%mm1 \n\t"\
  1527. "psrlq $8, %%mm2 \n\t"\
  1528. "pand bm00000001, %%mm3 \n\t"\
  1529. "pand bm10000000, %%mm4 \n\t"\
  1530. "por %%mm3, %%mm1 \n\t"\
  1531. "por %%mm4, %%mm2 \n\t"\
  1532. PAVGB(%%mm2, %%mm1)\
  1533. PAVGB(%%mm1, %%mm0)\
  1534. \
  1535. "pshufw $0xF9, %%mm0, %%mm3 \n\t"\
  1536. "pshufw $0x90, %%mm0, %%mm4 \n\t"\
  1537. PAVGB(%%mm3, %%mm4)\
  1538. PAVGB(%%mm4, %%mm0)\
  1539. "movd %%mm0, (%0) \n\t"\
  1540. "psrlq $32, %%mm0 \n\t"\
  1541. "movd %%mm0, 4(%0) \n\t"
  1542. #else
  1543. #define HLP3(i) "movq " #i "(%%eax), %%mm0 \n\t"\
  1544. "movq %%mm0, %%mm1 \n\t"\
  1545. "movq %%mm0, %%mm2 \n\t"\
  1546. "movd -4(%0), %%mm3 \n\t" /*0001000*/\
  1547. "movd 8(%0), %%mm4 \n\t" /*0001000*/\
  1548. "psllq $8, %%mm1 \n\t"\
  1549. "psrlq $8, %%mm2 \n\t"\
  1550. "psrlq $24, %%mm3 \n\t"\
  1551. "psllq $56, %%mm4 \n\t"\
  1552. "por %%mm3, %%mm1 \n\t"\
  1553. "por %%mm4, %%mm2 \n\t"\
  1554. PAVGB(%%mm2, %%mm1)\
  1555. PAVGB(%%mm1, %%mm0)\
  1556. \
  1557. "movq %%mm0, %%mm3 \n\t"\
  1558. "movq %%mm0, %%mm4 \n\t"\
  1559. "movq %%mm0, %%mm5 \n\t"\
  1560. "psrlq $16, %%mm3 \n\t"\
  1561. "psllq $16, %%mm4 \n\t"\
  1562. "pand bm11000000, %%mm5 \n\t"\
  1563. "por %%mm5, %%mm3 \n\t"\
  1564. "movq %%mm0, %%mm5 \n\t"\
  1565. "pand bm00000011, %%mm5 \n\t"\
  1566. "por %%mm5, %%mm4 \n\t"\
  1567. PAVGB(%%mm3, %%mm4)\
  1568. PAVGB(%%mm4, %%mm0)\
  1569. "movd %%mm0, (%0) \n\t"\
  1570. "psrlq $32, %%mm0 \n\t"\
  1571. "movd %%mm0, 4(%0) \n\t"
  1572. #endif
  1573. /* uses the 7-Tap Filter: 1112111 */
  1574. #define NEW_HLP(src, dst)\
  1575. "movq " #src "(%%eax), %%mm1 \n\t"\
  1576. "movq " #src "(%%eax), %%mm2 \n\t"\
  1577. "psllq $8, %%mm1 \n\t"\
  1578. "psrlq $8, %%mm2 \n\t"\
  1579. "movd -4" #dst ", %%mm3 \n\t" /*0001000*/\
  1580. "movd 8" #dst ", %%mm4 \n\t" /*0001000*/\
  1581. "psrlq $24, %%mm3 \n\t"\
  1582. "psllq $56, %%mm4 \n\t"\
  1583. "por %%mm3, %%mm1 \n\t"\
  1584. "por %%mm4, %%mm2 \n\t"\
  1585. "movq %%mm1, %%mm5 \n\t"\
  1586. PAVGB(%%mm2, %%mm1)\
  1587. "movq " #src "(%%eax), %%mm0 \n\t"\
  1588. PAVGB(%%mm1, %%mm0)\
  1589. "psllq $8, %%mm5 \n\t"\
  1590. "psrlq $8, %%mm2 \n\t"\
  1591. "por %%mm3, %%mm5 \n\t"\
  1592. "por %%mm4, %%mm2 \n\t"\
  1593. "movq %%mm5, %%mm1 \n\t"\
  1594. PAVGB(%%mm2, %%mm5)\
  1595. "psllq $8, %%mm1 \n\t"\
  1596. "psrlq $8, %%mm2 \n\t"\
  1597. "por %%mm3, %%mm1 \n\t"\
  1598. "por %%mm4, %%mm2 \n\t"\
  1599. PAVGB(%%mm2, %%mm1)\
  1600. PAVGB(%%mm1, %%mm5)\
  1601. PAVGB(%%mm5, %%mm0)\
  1602. "movd %%mm0, " #dst " \n\t"\
  1603. "psrlq $32, %%mm0 \n\t"\
  1604. "movd %%mm0, 4" #dst " \n\t"
  1605. /* uses the 9-Tap Filter: 112242211 */
  1606. #define NEW_HLP2(i)\
  1607. "movq " #i "(%%eax), %%mm0 \n\t" /*0001000*/\
  1608. "movq %%mm0, %%mm1 \n\t" /*0001000*/\
  1609. "movq %%mm0, %%mm2 \n\t" /*0001000*/\
  1610. "movd -4(%0), %%mm3 \n\t" /*0001000*/\
  1611. "movd 8(%0), %%mm4 \n\t" /*0001000*/\
  1612. "psllq $8, %%mm1 \n\t"\
  1613. "psrlq $8, %%mm2 \n\t"\
  1614. "psrlq $24, %%mm3 \n\t"\
  1615. "psllq $56, %%mm4 \n\t"\
  1616. "por %%mm3, %%mm1 \n\t" /*0010000*/\
  1617. "por %%mm4, %%mm2 \n\t" /*0000100*/\
  1618. "movq %%mm1, %%mm5 \n\t" /*0010000*/\
  1619. PAVGB(%%mm2, %%mm1) /*0010100*/\
  1620. PAVGB(%%mm1, %%mm0) /*0012100*/\
  1621. "psllq $8, %%mm5 \n\t"\
  1622. "psrlq $8, %%mm2 \n\t"\
  1623. "por %%mm3, %%mm5 \n\t" /*0100000*/\
  1624. "por %%mm4, %%mm2 \n\t" /*0000010*/\
  1625. "movq %%mm5, %%mm1 \n\t" /*0100000*/\
  1626. PAVGB(%%mm2, %%mm5) /*0100010*/\
  1627. "psllq $8, %%mm1 \n\t"\
  1628. "psrlq $8, %%mm2 \n\t"\
  1629. "por %%mm3, %%mm1 \n\t" /*1000000*/\
  1630. "por %%mm4, %%mm2 \n\t" /*0000001*/\
  1631. "movq %%mm1, %%mm6 \n\t" /*1000000*/\
  1632. PAVGB(%%mm2, %%mm1) /*1000001*/\
  1633. "psllq $8, %%mm6 \n\t"\
  1634. "psrlq $8, %%mm2 \n\t"\
  1635. "por %%mm3, %%mm6 \n\t"/*100000000*/\
  1636. "por %%mm4, %%mm2 \n\t"/*000000001*/\
  1637. PAVGB(%%mm2, %%mm6) /*100000001*/\
  1638. PAVGB(%%mm6, %%mm1) /*110000011*/\
  1639. PAVGB(%%mm1, %%mm5) /*112000211*/\
  1640. PAVGB(%%mm5, %%mm0) /*112242211*/\
  1641. "movd %%mm0, (%0) \n\t"\
  1642. "psrlq $32, %%mm0 \n\t"\
  1643. "movd %%mm0, 4(%0) \n\t"
  1644. #define HLP(src, dst) NEW_HLP(src, dst)
  1645. HLP(0, (%0))
  1646. HLP(8, (%%ecx))
  1647. HLP(16, (%%ecx, %1))
  1648. HLP(24, (%%ecx, %1, 2))
  1649. HLP(32, (%0, %1, 4))
  1650. HLP(40, (%%ebx))
  1651. HLP(48, (%%ebx, %1))
  1652. HLP(56, (%%ebx, %1, 2))
  1653. :
  1654. : "r" (dst), "r" (stride)
  1655. : "%eax", "%ebx", "%ecx"
  1656. );
  1657. #else
  1658. int y;
  1659. for(y=0; y<BLOCK_SIZE; y++)
  1660. {
  1661. const int first= ABS(dst[-1] - dst[0]) < QP ? dst[-1] : dst[0];
  1662. const int last= ABS(dst[8] - dst[7]) < QP ? dst[8] : dst[7];
  1663. int sums[9];
  1664. sums[0] = first + dst[0];
  1665. sums[1] = dst[0] + dst[1];
  1666. sums[2] = dst[1] + dst[2];
  1667. sums[3] = dst[2] + dst[3];
  1668. sums[4] = dst[3] + dst[4];
  1669. sums[5] = dst[4] + dst[5];
  1670. sums[6] = dst[5] + dst[6];
  1671. sums[7] = dst[6] + dst[7];
  1672. sums[8] = dst[7] + last;
  1673. dst[0]= ((sums[0]<<2) + ((first + sums[2])<<1) + sums[4] + 8)>>4;
  1674. dst[1]= ((dst[1]<<2) + ((first + sums[0] + sums[3])<<1) + sums[5] + 8)>>4;
  1675. dst[2]= ((dst[2]<<2) + ((first + sums[1] + sums[4])<<1) + sums[6] + 8)>>4;
  1676. dst[3]= ((dst[3]<<2) + ((sums[2] + sums[5])<<1) + sums[0] + sums[7] + 8)>>4;
  1677. dst[4]= ((dst[4]<<2) + ((sums[3] + sums[6])<<1) + sums[1] + sums[8] + 8)>>4;
  1678. dst[5]= ((dst[5]<<2) + ((last + sums[7] + sums[4])<<1) + sums[2] + 8)>>4;
  1679. dst[6]= (((last + dst[6])<<2) + ((dst[7] + sums[5])<<1) + sums[3] + 8)>>4;
  1680. dst[7]= ((sums[8]<<2) + ((last + sums[6])<<1) + sums[4] + 8)>>4;
  1681. dst+= stride;
  1682. }
  1683. #endif
  1684. }
  1685. static inline void dering(uint8_t src[], int stride, int QP)
  1686. {
  1687. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1688. asm volatile(
  1689. "movq pQPb, %%mm0 \n\t"
  1690. "paddusb %%mm0, %%mm0 \n\t"
  1691. "movq %%mm0, pQPb2 \n\t"
  1692. "leal (%0, %1), %%eax \n\t"
  1693. "leal (%%eax, %1, 4), %%ebx \n\t"
  1694. // 0 1 2 3 4 5 6 7 8 9
  1695. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1696. "pcmpeqb %%mm6, %%mm6 \n\t"
  1697. "pxor %%mm7, %%mm7 \n\t"
  1698. #ifdef HAVE_MMX2
  1699. #define FIND_MIN_MAX(addr)\
  1700. "movq " #addr ", %%mm0 \n\t"\
  1701. "pminub %%mm0, %%mm6 \n\t"\
  1702. "pmaxub %%mm0, %%mm7 \n\t"
  1703. #else
  1704. #define FIND_MIN_MAX(addr)\
  1705. "movq " #addr ", %%mm0 \n\t"\
  1706. "movq %%mm6, %%mm1 \n\t"\
  1707. "psubusb %%mm0, %%mm7 \n\t"\
  1708. "paddb %%mm0, %%mm7 \n\t"\
  1709. "psubusb %%mm0, %%mm1 \n\t"\
  1710. "psubb %%mm1, %%mm6 \n\t"
  1711. #endif
  1712. FIND_MIN_MAX((%%eax))
  1713. FIND_MIN_MAX((%%eax, %1))
  1714. FIND_MIN_MAX((%%eax, %1, 2))
  1715. FIND_MIN_MAX((%0, %1, 4))
  1716. FIND_MIN_MAX((%%ebx))
  1717. FIND_MIN_MAX((%%ebx, %1))
  1718. FIND_MIN_MAX((%%ebx, %1, 2))
  1719. FIND_MIN_MAX((%0, %1, 8))
  1720. "movq %%mm6, %%mm4 \n\t"
  1721. "psrlq $8, %%mm6 \n\t"
  1722. #ifdef HAVE_MMX2
  1723. "pminub %%mm4, %%mm6 \n\t" // min of pixels
  1724. "pshufw $0xF9, %%mm6, %%mm4 \n\t"
  1725. "pminub %%mm4, %%mm6 \n\t" // min of pixels
  1726. "pshufw $0xFE, %%mm6, %%mm4 \n\t"
  1727. "pminub %%mm4, %%mm6 \n\t"
  1728. #else
  1729. "movq %%mm6, %%mm1 \n\t"
  1730. "psubusb %%mm4, %%mm1 \n\t"
  1731. "psubb %%mm1, %%mm6 \n\t"
  1732. "movq %%mm6, %%mm4 \n\t"
  1733. "psrlq $16, %%mm6 \n\t"
  1734. "movq %%mm6, %%mm1 \n\t"
  1735. "psubusb %%mm4, %%mm1 \n\t"
  1736. "psubb %%mm1, %%mm6 \n\t"
  1737. "movq %%mm6, %%mm4 \n\t"
  1738. "psrlq $32, %%mm6 \n\t"
  1739. "movq %%mm6, %%mm1 \n\t"
  1740. "psubusb %%mm4, %%mm1 \n\t"
  1741. "psubb %%mm1, %%mm6 \n\t"
  1742. #endif
  1743. "movq %%mm7, %%mm4 \n\t"
  1744. "psrlq $8, %%mm7 \n\t"
  1745. #ifdef HAVE_MMX2
  1746. "pmaxub %%mm4, %%mm7 \n\t" // max of pixels
  1747. "pshufw $0xF9, %%mm7, %%mm4 \n\t"
  1748. "pmaxub %%mm4, %%mm7 \n\t"
  1749. "pshufw $0xFE, %%mm7, %%mm4 \n\t"
  1750. "pmaxub %%mm4, %%mm7 \n\t"
  1751. #else
  1752. "psubusb %%mm4, %%mm7 \n\t"
  1753. "paddb %%mm4, %%mm7 \n\t"
  1754. "movq %%mm7, %%mm4 \n\t"
  1755. "psrlq $16, %%mm7 \n\t"
  1756. "psubusb %%mm4, %%mm7 \n\t"
  1757. "paddb %%mm4, %%mm7 \n\t"
  1758. "movq %%mm7, %%mm4 \n\t"
  1759. "psrlq $32, %%mm7 \n\t"
  1760. "psubusb %%mm4, %%mm7 \n\t"
  1761. "paddb %%mm4, %%mm7 \n\t"
  1762. #endif
  1763. PAVGB(%%mm6, %%mm7) // a=(max + min)/2
  1764. "punpcklbw %%mm7, %%mm7 \n\t"
  1765. "punpcklbw %%mm7, %%mm7 \n\t"
  1766. "punpcklbw %%mm7, %%mm7 \n\t"
  1767. "movq %%mm7, temp0 \n\t"
  1768. "movq (%0), %%mm0 \n\t" // L10
  1769. "movq %%mm0, %%mm1 \n\t" // L10
  1770. "movq %%mm0, %%mm2 \n\t" // L10
  1771. "psllq $8, %%mm1 \n\t"
  1772. "psrlq $8, %%mm2 \n\t"
  1773. "movd -4(%0), %%mm3 \n\t"
  1774. "movd 8(%0), %%mm4 \n\t"
  1775. "psrlq $24, %%mm3 \n\t"
  1776. "psllq $56, %%mm4 \n\t"
  1777. "por %%mm3, %%mm1 \n\t" // L00
  1778. "por %%mm4, %%mm2 \n\t" // L20
  1779. "movq %%mm1, %%mm3 \n\t" // L00
  1780. PAVGB(%%mm2, %%mm1) // (L20 + L00)/2
  1781. PAVGB(%%mm0, %%mm1) // (L20 + L00 + 2L10)/4
  1782. "psubusb %%mm7, %%mm0 \n\t"
  1783. "psubusb %%mm7, %%mm2 \n\t"
  1784. "psubusb %%mm7, %%mm3 \n\t"
  1785. "pcmpeqb b00, %%mm0 \n\t" // L10 > a ? 0 : -1
  1786. "pcmpeqb b00, %%mm2 \n\t" // L20 > a ? 0 : -1
  1787. "pcmpeqb b00, %%mm3 \n\t" // L00 > a ? 0 : -1
  1788. "paddb %%mm2, %%mm0 \n\t"
  1789. "paddb %%mm3, %%mm0 \n\t"
  1790. "movq (%%eax), %%mm2 \n\t" // L11
  1791. "movq %%mm2, %%mm3 \n\t" // L11
  1792. "movq %%mm2, %%mm4 \n\t" // L11
  1793. "psllq $8, %%mm3 \n\t"
  1794. "psrlq $8, %%mm4 \n\t"
  1795. "movd -4(%%eax), %%mm5 \n\t"
  1796. "movd 8(%%eax), %%mm6 \n\t"
  1797. "psrlq $24, %%mm5 \n\t"
  1798. "psllq $56, %%mm6 \n\t"
  1799. "por %%mm5, %%mm3 \n\t" // L01
  1800. "por %%mm6, %%mm4 \n\t" // L21
  1801. "movq %%mm3, %%mm5 \n\t" // L01
  1802. PAVGB(%%mm4, %%mm3) // (L21 + L01)/2
  1803. PAVGB(%%mm2, %%mm3) // (L21 + L01 + 2L11)/4
  1804. "psubusb %%mm7, %%mm2 \n\t"
  1805. "psubusb %%mm7, %%mm4 \n\t"
  1806. "psubusb %%mm7, %%mm5 \n\t"
  1807. "pcmpeqb b00, %%mm2 \n\t" // L11 > a ? 0 : -1
  1808. "pcmpeqb b00, %%mm4 \n\t" // L21 > a ? 0 : -1
  1809. "pcmpeqb b00, %%mm5 \n\t" // L01 > a ? 0 : -1
  1810. "paddb %%mm4, %%mm2 \n\t"
  1811. "paddb %%mm5, %%mm2 \n\t"
  1812. // 0, 2, 3, 1
  1813. #define DERING_CORE(dst,src,ppsx,psx,sx,pplx,plx,lx,t0,t1) \
  1814. "movq " #src ", " #sx " \n\t" /* src[0] */\
  1815. "movq " #sx ", " #lx " \n\t" /* src[0] */\
  1816. "movq " #sx ", " #t0 " \n\t" /* src[0] */\
  1817. "psllq $8, " #lx " \n\t"\
  1818. "psrlq $8, " #t0 " \n\t"\
  1819. "movd -4" #src ", " #t1 " \n\t"\
  1820. "psrlq $24, " #t1 " \n\t"\
  1821. "por " #t1 ", " #lx " \n\t" /* src[-1] */\
  1822. "movd 8" #src ", " #t1 " \n\t"\
  1823. "psllq $56, " #t1 " \n\t"\
  1824. "por " #t1 ", " #t0 " \n\t" /* src[+1] */\
  1825. "movq " #lx ", " #t1 " \n\t" /* src[-1] */\
  1826. PAVGB(t0, lx) /* (src[-1] + src[+1])/2 */\
  1827. PAVGB(sx, lx) /* (src[-1] + 2src[0] + src[+1])/4 */\
  1828. PAVGB(lx, pplx) \
  1829. "movq " #lx ", temp1 \n\t"\
  1830. "movq temp0, " #lx " \n\t"\
  1831. "psubusb " #lx ", " #t1 " \n\t"\
  1832. "psubusb " #lx ", " #t0 " \n\t"\
  1833. "psubusb " #lx ", " #sx " \n\t"\
  1834. "movq b00, " #lx " \n\t"\
  1835. "pcmpeqb " #lx ", " #t1 " \n\t" /* src[-1] > a ? 0 : -1*/\
  1836. "pcmpeqb " #lx ", " #t0 " \n\t" /* src[+1] > a ? 0 : -1*/\
  1837. "pcmpeqb " #lx ", " #sx " \n\t" /* src[0] > a ? 0 : -1*/\
  1838. "paddb " #t1 ", " #t0 " \n\t"\
  1839. "paddb " #t0 ", " #sx " \n\t"\
  1840. \
  1841. PAVGB(plx, pplx) /* filtered */\
  1842. "movq " #dst ", " #t0 " \n\t" /* dst */\
  1843. "movq " #t0 ", " #t1 " \n\t" /* dst */\
  1844. "psubusb pQPb2, " #t0 " \n\t"\
  1845. "paddusb pQPb2, " #t1 " \n\t"\
  1846. PMAXUB(t0, pplx)\
  1847. PMINUB(t1, pplx, t0)\
  1848. "paddb " #sx ", " #ppsx " \n\t"\
  1849. "paddb " #psx ", " #ppsx " \n\t"\
  1850. "#paddb b02, " #ppsx " \n\t"\
  1851. "pand b08, " #ppsx " \n\t"\
  1852. "pcmpeqb " #lx ", " #ppsx " \n\t"\
  1853. "pand " #ppsx ", " #pplx " \n\t"\
  1854. "pandn " #dst ", " #ppsx " \n\t"\
  1855. "por " #pplx ", " #ppsx " \n\t"\
  1856. "movq " #ppsx ", " #dst " \n\t"\
  1857. "movq temp1, " #lx " \n\t"
  1858. /*
  1859. 0000000
  1860. 1111111
  1861. 1111110
  1862. 1111101
  1863. 1111100
  1864. 1111011
  1865. 1111010
  1866. 1111001
  1867. 1111000
  1868. 1110111
  1869. */
  1870. //DERING_CORE(dst,src ,ppsx ,psx ,sx ,pplx ,plx ,lx ,t0 ,t1)
  1871. DERING_CORE((%%eax),(%%eax, %1) ,%%mm0,%%mm2,%%mm4,%%mm1,%%mm3,%%mm5,%%mm6,%%mm7)
  1872. DERING_CORE((%%eax, %1),(%%eax, %1, 2) ,%%mm2,%%mm4,%%mm0,%%mm3,%%mm5,%%mm1,%%mm6,%%mm7)
  1873. DERING_CORE((%%eax, %1, 2),(%0, %1, 4) ,%%mm4,%%mm0,%%mm2,%%mm5,%%mm1,%%mm3,%%mm6,%%mm7)
  1874. DERING_CORE((%0, %1, 4),(%%ebx) ,%%mm0,%%mm2,%%mm4,%%mm1,%%mm3,%%mm5,%%mm6,%%mm7)
  1875. DERING_CORE((%%ebx),(%%ebx, %1) ,%%mm2,%%mm4,%%mm0,%%mm3,%%mm5,%%mm1,%%mm6,%%mm7)
  1876. DERING_CORE((%%ebx, %1), (%%ebx, %1, 2),%%mm4,%%mm0,%%mm2,%%mm5,%%mm1,%%mm3,%%mm6,%%mm7)
  1877. DERING_CORE((%%ebx, %1, 2),(%0, %1, 8) ,%%mm0,%%mm2,%%mm4,%%mm1,%%mm3,%%mm5,%%mm6,%%mm7)
  1878. DERING_CORE((%0, %1, 8),(%%ebx, %1, 4) ,%%mm2,%%mm4,%%mm0,%%mm3,%%mm5,%%mm1,%%mm6,%%mm7)
  1879. : : "r" (src), "r" (stride), "r" (QP)
  1880. : "%eax", "%ebx"
  1881. );
  1882. #else
  1883. int y;
  1884. int min=255;
  1885. int max=0;
  1886. int avg;
  1887. uint8_t *p;
  1888. int s[10];
  1889. for(y=1; y<9; y++)
  1890. {
  1891. int x;
  1892. p= src + stride*y;
  1893. for(x=1; x<9; x++)
  1894. {
  1895. p++;
  1896. if(*p > max) max= *p;
  1897. if(*p < min) min= *p;
  1898. }
  1899. }
  1900. avg= (min + max + 1)/2;
  1901. for(y=0; y<10; y++)
  1902. {
  1903. int x;
  1904. int t = 0;
  1905. p= src + stride*y;
  1906. for(x=0; x<10; x++)
  1907. {
  1908. if(*p > avg) t |= (1<<x);
  1909. p++;
  1910. }
  1911. t |= (~t)<<16;
  1912. t &= (t<<1) & (t>>1);
  1913. s[y] = t;
  1914. }
  1915. for(y=1; y<9; y++)
  1916. {
  1917. int x;
  1918. int t = s[y-1] & s[y] & s[y+1];
  1919. t|= t>>16;
  1920. p= src + stride*y;
  1921. for(x=1; x<9; x++)
  1922. {
  1923. p++;
  1924. if(t & (1<<x))
  1925. {
  1926. int f= (*(p-stride-1)) + 2*(*(p-stride)) + (*(p-stride+1))
  1927. +2*(*(p -1)) + 4*(*p ) + 2*(*(p +1))
  1928. +(*(p+stride-1)) + 2*(*(p+stride)) + (*(p+stride+1));
  1929. f= (f + 8)>>4;
  1930. if (*p + 2*QP < f) *p= *p + 2*QP;
  1931. else if(*p - 2*QP > f) *p= *p - 2*QP;
  1932. else *p=f;
  1933. }
  1934. }
  1935. }
  1936. #endif
  1937. }
  1938. /**
  1939. * Deinterlaces the given block
  1940. * will be called for every 8x8 block and can read & write from line 4-15
  1941. * lines 0-3 have been passed through the deblock / dering filters allready, but can be read too
  1942. * lines 4-12 will be read into the deblocking filter and should be deinterlaced
  1943. */
  1944. static inline void deInterlaceInterpolateLinear(uint8_t src[], int stride)
  1945. {
  1946. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1947. src+= 4*stride;
  1948. asm volatile(
  1949. "leal (%0, %1), %%eax \n\t"
  1950. "leal (%%eax, %1, 4), %%ebx \n\t"
  1951. // 0 1 2 3 4 5 6 7 8 9
  1952. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1953. "movq (%0), %%mm0 \n\t"
  1954. "movq (%%eax, %1), %%mm1 \n\t"
  1955. PAVGB(%%mm1, %%mm0)
  1956. "movq %%mm0, (%%eax) \n\t"
  1957. "movq (%0, %1, 4), %%mm0 \n\t"
  1958. PAVGB(%%mm0, %%mm1)
  1959. "movq %%mm1, (%%eax, %1, 2) \n\t"
  1960. "movq (%%ebx, %1), %%mm1 \n\t"
  1961. PAVGB(%%mm1, %%mm0)
  1962. "movq %%mm0, (%%ebx) \n\t"
  1963. "movq (%0, %1, 8), %%mm0 \n\t"
  1964. PAVGB(%%mm0, %%mm1)
  1965. "movq %%mm1, (%%ebx, %1, 2) \n\t"
  1966. : : "r" (src), "r" (stride)
  1967. : "%eax", "%ebx"
  1968. );
  1969. #else
  1970. int x;
  1971. src+= 4*stride;
  1972. for(x=0; x<8; x++)
  1973. {
  1974. src[stride] = (src[0] + src[stride*2])>>1;
  1975. src[stride*3] = (src[stride*2] + src[stride*4])>>1;
  1976. src[stride*5] = (src[stride*4] + src[stride*6])>>1;
  1977. src[stride*7] = (src[stride*6] + src[stride*8])>>1;
  1978. src++;
  1979. }
  1980. #endif
  1981. }
  1982. /**
  1983. * Deinterlaces the given block
  1984. * will be called for every 8x8 block and can read & write from line 4-15
  1985. * lines 0-3 have been passed through the deblock / dering filters allready, but can be read too
  1986. * lines 4-12 will be read into the deblocking filter and should be deinterlaced
  1987. * this filter will read lines 3-15 and write 7-13
  1988. * no cliping in C version
  1989. */
  1990. static inline void deInterlaceInterpolateCubic(uint8_t src[], int stride)
  1991. {
  1992. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1993. src+= stride*3;
  1994. asm volatile(
  1995. "leal (%0, %1), %%eax \n\t"
  1996. "leal (%%eax, %1, 4), %%ebx \n\t"
  1997. "leal (%%ebx, %1, 4), %%ecx \n\t"
  1998. "addl %1, %%ecx \n\t"
  1999. "pxor %%mm7, %%mm7 \n\t"
  2000. // 0 1 2 3 4 5 6 7 8 9 10
  2001. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1 ecx
  2002. #define DEINT_CUBIC(a,b,c,d,e)\
  2003. "movq " #a ", %%mm0 \n\t"\
  2004. "movq " #b ", %%mm1 \n\t"\
  2005. "movq " #d ", %%mm2 \n\t"\
  2006. "movq " #e ", %%mm3 \n\t"\
  2007. PAVGB(%%mm2, %%mm1) /* (b+d) /2 */\
  2008. PAVGB(%%mm3, %%mm0) /* a(a+e) /2 */\
  2009. "movq %%mm0, %%mm2 \n\t"\
  2010. "punpcklbw %%mm7, %%mm0 \n\t"\
  2011. "punpckhbw %%mm7, %%mm2 \n\t"\
  2012. "movq %%mm1, %%mm3 \n\t"\
  2013. "punpcklbw %%mm7, %%mm1 \n\t"\
  2014. "punpckhbw %%mm7, %%mm3 \n\t"\
  2015. "psubw %%mm1, %%mm0 \n\t" /* L(a+e - (b+d))/2 */\
  2016. "psubw %%mm3, %%mm2 \n\t" /* H(a+e - (b+d))/2 */\
  2017. "psraw $3, %%mm0 \n\t" /* L(a+e - (b+d))/16 */\
  2018. "psraw $3, %%mm2 \n\t" /* H(a+e - (b+d))/16 */\
  2019. "psubw %%mm0, %%mm1 \n\t" /* L(9b + 9d - a - e)/16 */\
  2020. "psubw %%mm2, %%mm3 \n\t" /* H(9b + 9d - a - e)/16 */\
  2021. "packuswb %%mm3, %%mm1 \n\t"\
  2022. "movq %%mm1, " #c " \n\t"
  2023. DEINT_CUBIC((%0), (%%eax, %1), (%%eax, %1, 2), (%0, %1, 4), (%%ebx, %1))
  2024. DEINT_CUBIC((%%eax, %1), (%0, %1, 4), (%%ebx), (%%ebx, %1), (%0, %1, 8))
  2025. DEINT_CUBIC((%0, %1, 4), (%%ebx, %1), (%%ebx, %1, 2), (%0, %1, 8), (%%ecx))
  2026. DEINT_CUBIC((%%ebx, %1), (%0, %1, 8), (%%ebx, %1, 4), (%%ecx), (%%ecx, %1, 2))
  2027. : : "r" (src), "r" (stride)
  2028. : "%eax", "%ebx", "ecx"
  2029. );
  2030. #else
  2031. int x;
  2032. src+= stride*3;
  2033. for(x=0; x<8; x++)
  2034. {
  2035. src[stride*3] = (-src[0] + 9*src[stride*2] + 9*src[stride*4] - src[stride*6])>>4;
  2036. src[stride*5] = (-src[stride*2] + 9*src[stride*4] + 9*src[stride*6] - src[stride*8])>>4;
  2037. src[stride*7] = (-src[stride*4] + 9*src[stride*6] + 9*src[stride*8] - src[stride*10])>>4;
  2038. src[stride*9] = (-src[stride*6] + 9*src[stride*8] + 9*src[stride*10] - src[stride*12])>>4;
  2039. src++;
  2040. }
  2041. #endif
  2042. }
  2043. /**
  2044. * Deinterlaces the given block
  2045. * will be called for every 8x8 block and can read & write from line 4-15
  2046. * lines 0-3 have been passed through the deblock / dering filters allready, but can be read too
  2047. * lines 4-12 will be read into the deblocking filter and should be deinterlaced
  2048. * will shift the image up by 1 line (FIXME if this is a problem)
  2049. * this filter will read lines 4-13 and write 4-11
  2050. */
  2051. static inline void deInterlaceBlendLinear(uint8_t src[], int stride)
  2052. {
  2053. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  2054. src+= 4*stride;
  2055. asm volatile(
  2056. "leal (%0, %1), %%eax \n\t"
  2057. "leal (%%eax, %1, 4), %%ebx \n\t"
  2058. // 0 1 2 3 4 5 6 7 8 9
  2059. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  2060. "movq (%0), %%mm0 \n\t" // L0
  2061. "movq (%%eax, %1), %%mm1 \n\t" // L2
  2062. PAVGB(%%mm1, %%mm0) // L0+L2
  2063. "movq (%%eax), %%mm2 \n\t" // L1
  2064. PAVGB(%%mm2, %%mm0)
  2065. "movq %%mm0, (%0) \n\t"
  2066. "movq (%%eax, %1, 2), %%mm0 \n\t" // L3
  2067. PAVGB(%%mm0, %%mm2) // L1+L3
  2068. PAVGB(%%mm1, %%mm2) // 2L2 + L1 + L3
  2069. "movq %%mm2, (%%eax) \n\t"
  2070. "movq (%0, %1, 4), %%mm2 \n\t" // L4
  2071. PAVGB(%%mm2, %%mm1) // L2+L4
  2072. PAVGB(%%mm0, %%mm1) // 2L3 + L2 + L4
  2073. "movq %%mm1, (%%eax, %1) \n\t"
  2074. "movq (%%ebx), %%mm1 \n\t" // L5
  2075. PAVGB(%%mm1, %%mm0) // L3+L5
  2076. PAVGB(%%mm2, %%mm0) // 2L4 + L3 + L5
  2077. "movq %%mm0, (%%eax, %1, 2) \n\t"
  2078. "movq (%%ebx, %1), %%mm0 \n\t" // L6
  2079. PAVGB(%%mm0, %%mm2) // L4+L6
  2080. PAVGB(%%mm1, %%mm2) // 2L5 + L4 + L6
  2081. "movq %%mm2, (%0, %1, 4) \n\t"
  2082. "movq (%%ebx, %1, 2), %%mm2 \n\t" // L7
  2083. PAVGB(%%mm2, %%mm1) // L5+L7
  2084. PAVGB(%%mm0, %%mm1) // 2L6 + L5 + L7
  2085. "movq %%mm1, (%%ebx) \n\t"
  2086. "movq (%0, %1, 8), %%mm1 \n\t" // L8
  2087. PAVGB(%%mm1, %%mm0) // L6+L8
  2088. PAVGB(%%mm2, %%mm0) // 2L7 + L6 + L8
  2089. "movq %%mm0, (%%ebx, %1) \n\t"
  2090. "movq (%%ebx, %1, 4), %%mm0 \n\t" // L9
  2091. PAVGB(%%mm0, %%mm2) // L7+L9
  2092. PAVGB(%%mm1, %%mm2) // 2L8 + L7 + L9
  2093. "movq %%mm2, (%%ebx, %1, 2) \n\t"
  2094. : : "r" (src), "r" (stride)
  2095. : "%eax", "%ebx"
  2096. );
  2097. #else
  2098. int x;
  2099. src+= 4*stride;
  2100. for(x=0; x<8; x++)
  2101. {
  2102. src[0 ] = (src[0 ] + 2*src[stride ] + src[stride*2])>>2;
  2103. src[stride ] = (src[stride ] + 2*src[stride*2] + src[stride*3])>>2;
  2104. src[stride*2] = (src[stride*2] + 2*src[stride*3] + src[stride*4])>>2;
  2105. src[stride*3] = (src[stride*3] + 2*src[stride*4] + src[stride*5])>>2;
  2106. src[stride*4] = (src[stride*4] + 2*src[stride*5] + src[stride*6])>>2;
  2107. src[stride*5] = (src[stride*5] + 2*src[stride*6] + src[stride*7])>>2;
  2108. src[stride*6] = (src[stride*6] + 2*src[stride*7] + src[stride*8])>>2;
  2109. src[stride*7] = (src[stride*7] + 2*src[stride*8] + src[stride*9])>>2;
  2110. src++;
  2111. }
  2112. #endif
  2113. }
  2114. /**
  2115. * Deinterlaces the given block
  2116. * will be called for every 8x8 block and can read & write from line 4-15,
  2117. * lines 0-3 have been passed through the deblock / dering filters allready, but can be read too
  2118. * lines 4-12 will be read into the deblocking filter and should be deinterlaced
  2119. */
  2120. static inline void deInterlaceMedian(uint8_t src[], int stride)
  2121. {
  2122. #ifdef HAVE_MMX
  2123. src+= 4*stride;
  2124. #ifdef HAVE_MMX2
  2125. asm volatile(
  2126. "leal (%0, %1), %%eax \n\t"
  2127. "leal (%%eax, %1, 4), %%ebx \n\t"
  2128. // 0 1 2 3 4 5 6 7 8 9
  2129. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  2130. "movq (%0), %%mm0 \n\t" //
  2131. "movq (%%eax, %1), %%mm2 \n\t" //
  2132. "movq (%%eax), %%mm1 \n\t" //
  2133. "movq %%mm0, %%mm3 \n\t"
  2134. "pmaxub %%mm1, %%mm0 \n\t" //
  2135. "pminub %%mm3, %%mm1 \n\t" //
  2136. "pmaxub %%mm2, %%mm1 \n\t" //
  2137. "pminub %%mm1, %%mm0 \n\t"
  2138. "movq %%mm0, (%%eax) \n\t"
  2139. "movq (%0, %1, 4), %%mm0 \n\t" //
  2140. "movq (%%eax, %1, 2), %%mm1 \n\t" //
  2141. "movq %%mm2, %%mm3 \n\t"
  2142. "pmaxub %%mm1, %%mm2 \n\t" //
  2143. "pminub %%mm3, %%mm1 \n\t" //
  2144. "pmaxub %%mm0, %%mm1 \n\t" //
  2145. "pminub %%mm1, %%mm2 \n\t"
  2146. "movq %%mm2, (%%eax, %1, 2) \n\t"
  2147. "movq (%%ebx), %%mm2 \n\t" //
  2148. "movq (%%ebx, %1), %%mm1 \n\t" //
  2149. "movq %%mm2, %%mm3 \n\t"
  2150. "pmaxub %%mm0, %%mm2 \n\t" //
  2151. "pminub %%mm3, %%mm0 \n\t" //
  2152. "pmaxub %%mm1, %%mm0 \n\t" //
  2153. "pminub %%mm0, %%mm2 \n\t"
  2154. "movq %%mm2, (%%ebx) \n\t"
  2155. "movq (%%ebx, %1, 2), %%mm2 \n\t" //
  2156. "movq (%0, %1, 8), %%mm0 \n\t" //
  2157. "movq %%mm2, %%mm3 \n\t"
  2158. "pmaxub %%mm0, %%mm2 \n\t" //
  2159. "pminub %%mm3, %%mm0 \n\t" //
  2160. "pmaxub %%mm1, %%mm0 \n\t" //
  2161. "pminub %%mm0, %%mm2 \n\t"
  2162. "movq %%mm2, (%%ebx, %1, 2) \n\t"
  2163. : : "r" (src), "r" (stride)
  2164. : "%eax", "%ebx"
  2165. );
  2166. #else // MMX without MMX2
  2167. asm volatile(
  2168. "leal (%0, %1), %%eax \n\t"
  2169. "leal (%%eax, %1, 4), %%ebx \n\t"
  2170. // 0 1 2 3 4 5 6 7 8 9
  2171. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  2172. "pxor %%mm7, %%mm7 \n\t"
  2173. #define MEDIAN(a,b,c)\
  2174. "movq " #a ", %%mm0 \n\t"\
  2175. "movq " #b ", %%mm2 \n\t"\
  2176. "movq " #c ", %%mm1 \n\t"\
  2177. "movq %%mm0, %%mm3 \n\t"\
  2178. "movq %%mm1, %%mm4 \n\t"\
  2179. "movq %%mm2, %%mm5 \n\t"\
  2180. "psubusb %%mm1, %%mm3 \n\t"\
  2181. "psubusb %%mm2, %%mm4 \n\t"\
  2182. "psubusb %%mm0, %%mm5 \n\t"\
  2183. "pcmpeqb %%mm7, %%mm3 \n\t"\
  2184. "pcmpeqb %%mm7, %%mm4 \n\t"\
  2185. "pcmpeqb %%mm7, %%mm5 \n\t"\
  2186. "movq %%mm3, %%mm6 \n\t"\
  2187. "pxor %%mm4, %%mm3 \n\t"\
  2188. "pxor %%mm5, %%mm4 \n\t"\
  2189. "pxor %%mm6, %%mm5 \n\t"\
  2190. "por %%mm3, %%mm1 \n\t"\
  2191. "por %%mm4, %%mm2 \n\t"\
  2192. "por %%mm5, %%mm0 \n\t"\
  2193. "pand %%mm2, %%mm0 \n\t"\
  2194. "pand %%mm1, %%mm0 \n\t"\
  2195. "movq %%mm0, " #b " \n\t"
  2196. MEDIAN((%0), (%%eax), (%%eax, %1))
  2197. MEDIAN((%%eax, %1), (%%eax, %1, 2), (%0, %1, 4))
  2198. MEDIAN((%0, %1, 4), (%%ebx), (%%ebx, %1))
  2199. MEDIAN((%%ebx, %1), (%%ebx, %1, 2), (%0, %1, 8))
  2200. : : "r" (src), "r" (stride)
  2201. : "%eax", "%ebx"
  2202. );
  2203. #endif // MMX
  2204. #else
  2205. //FIXME
  2206. int x;
  2207. src+= 4*stride;
  2208. for(x=0; x<8; x++)
  2209. {
  2210. src[0 ] = (src[0 ] + 2*src[stride ] + src[stride*2])>>2;
  2211. src[stride ] = (src[stride ] + 2*src[stride*2] + src[stride*3])>>2;
  2212. src[stride*2] = (src[stride*2] + 2*src[stride*3] + src[stride*4])>>2;
  2213. src[stride*3] = (src[stride*3] + 2*src[stride*4] + src[stride*5])>>2;
  2214. src[stride*4] = (src[stride*4] + 2*src[stride*5] + src[stride*6])>>2;
  2215. src[stride*5] = (src[stride*5] + 2*src[stride*6] + src[stride*7])>>2;
  2216. src[stride*6] = (src[stride*6] + 2*src[stride*7] + src[stride*8])>>2;
  2217. src[stride*7] = (src[stride*7] + 2*src[stride*8] + src[stride*9])>>2;
  2218. src++;
  2219. }
  2220. #endif
  2221. }
  2222. #ifdef HAVE_MMX
  2223. /**
  2224. * transposes and shift the given 8x8 Block into dst1 and dst2
  2225. */
  2226. static inline void transpose1(uint8_t *dst1, uint8_t *dst2, uint8_t *src, int srcStride)
  2227. {
  2228. asm(
  2229. "leal (%0, %1), %%eax \n\t"
  2230. "leal (%%eax, %1, 4), %%ebx \n\t"
  2231. // 0 1 2 3 4 5 6 7 8 9
  2232. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  2233. "movq (%0), %%mm0 \n\t" // 12345678
  2234. "movq (%%eax), %%mm1 \n\t" // abcdefgh
  2235. "movq %%mm0, %%mm2 \n\t" // 12345678
  2236. "punpcklbw %%mm1, %%mm0 \n\t" // 1a2b3c4d
  2237. "punpckhbw %%mm1, %%mm2 \n\t" // 5e6f7g8h
  2238. "movq (%%eax, %1), %%mm1 \n\t"
  2239. "movq (%%eax, %1, 2), %%mm3 \n\t"
  2240. "movq %%mm1, %%mm4 \n\t"
  2241. "punpcklbw %%mm3, %%mm1 \n\t"
  2242. "punpckhbw %%mm3, %%mm4 \n\t"
  2243. "movq %%mm0, %%mm3 \n\t"
  2244. "punpcklwd %%mm1, %%mm0 \n\t"
  2245. "punpckhwd %%mm1, %%mm3 \n\t"
  2246. "movq %%mm2, %%mm1 \n\t"
  2247. "punpcklwd %%mm4, %%mm2 \n\t"
  2248. "punpckhwd %%mm4, %%mm1 \n\t"
  2249. "movd %%mm0, 128(%2) \n\t"
  2250. "psrlq $32, %%mm0 \n\t"
  2251. "movd %%mm0, 144(%2) \n\t"
  2252. "movd %%mm3, 160(%2) \n\t"
  2253. "psrlq $32, %%mm3 \n\t"
  2254. "movd %%mm3, 176(%2) \n\t"
  2255. "movd %%mm3, 48(%3) \n\t"
  2256. "movd %%mm2, 192(%2) \n\t"
  2257. "movd %%mm2, 64(%3) \n\t"
  2258. "psrlq $32, %%mm2 \n\t"
  2259. "movd %%mm2, 80(%3) \n\t"
  2260. "movd %%mm1, 96(%3) \n\t"
  2261. "psrlq $32, %%mm1 \n\t"
  2262. "movd %%mm1, 112(%3) \n\t"
  2263. "movq (%0, %1, 4), %%mm0 \n\t" // 12345678
  2264. "movq (%%ebx), %%mm1 \n\t" // abcdefgh
  2265. "movq %%mm0, %%mm2 \n\t" // 12345678
  2266. "punpcklbw %%mm1, %%mm0 \n\t" // 1a2b3c4d
  2267. "punpckhbw %%mm1, %%mm2 \n\t" // 5e6f7g8h
  2268. "movq (%%ebx, %1), %%mm1 \n\t"
  2269. "movq (%%ebx, %1, 2), %%mm3 \n\t"
  2270. "movq %%mm1, %%mm4 \n\t"
  2271. "punpcklbw %%mm3, %%mm1 \n\t"
  2272. "punpckhbw %%mm3, %%mm4 \n\t"
  2273. "movq %%mm0, %%mm3 \n\t"
  2274. "punpcklwd %%mm1, %%mm0 \n\t"
  2275. "punpckhwd %%mm1, %%mm3 \n\t"
  2276. "movq %%mm2, %%mm1 \n\t"
  2277. "punpcklwd %%mm4, %%mm2 \n\t"
  2278. "punpckhwd %%mm4, %%mm1 \n\t"
  2279. "movd %%mm0, 132(%2) \n\t"
  2280. "psrlq $32, %%mm0 \n\t"
  2281. "movd %%mm0, 148(%2) \n\t"
  2282. "movd %%mm3, 164(%2) \n\t"
  2283. "psrlq $32, %%mm3 \n\t"
  2284. "movd %%mm3, 180(%2) \n\t"
  2285. "movd %%mm3, 52(%3) \n\t"
  2286. "movd %%mm2, 196(%2) \n\t"
  2287. "movd %%mm2, 68(%3) \n\t"
  2288. "psrlq $32, %%mm2 \n\t"
  2289. "movd %%mm2, 84(%3) \n\t"
  2290. "movd %%mm1, 100(%3) \n\t"
  2291. "psrlq $32, %%mm1 \n\t"
  2292. "movd %%mm1, 116(%3) \n\t"
  2293. :: "r" (src), "r" (srcStride), "r" (dst1), "r" (dst2)
  2294. : "%eax", "%ebx"
  2295. );
  2296. }
  2297. /**
  2298. * transposes the given 8x8 block
  2299. */
  2300. static inline void transpose2(uint8_t *dst, int dstStride, uint8_t *src)
  2301. {
  2302. asm(
  2303. "leal (%0, %1), %%eax \n\t"
  2304. "leal (%%eax, %1, 4), %%ebx \n\t"
  2305. // 0 1 2 3 4 5 6 7 8 9
  2306. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  2307. "movq (%2), %%mm0 \n\t" // 12345678
  2308. "movq 16(%2), %%mm1 \n\t" // abcdefgh
  2309. "movq %%mm0, %%mm2 \n\t" // 12345678
  2310. "punpcklbw %%mm1, %%mm0 \n\t" // 1a2b3c4d
  2311. "punpckhbw %%mm1, %%mm2 \n\t" // 5e6f7g8h
  2312. "movq 32(%2), %%mm1 \n\t"
  2313. "movq 48(%2), %%mm3 \n\t"
  2314. "movq %%mm1, %%mm4 \n\t"
  2315. "punpcklbw %%mm3, %%mm1 \n\t"
  2316. "punpckhbw %%mm3, %%mm4 \n\t"
  2317. "movq %%mm0, %%mm3 \n\t"
  2318. "punpcklwd %%mm1, %%mm0 \n\t"
  2319. "punpckhwd %%mm1, %%mm3 \n\t"
  2320. "movq %%mm2, %%mm1 \n\t"
  2321. "punpcklwd %%mm4, %%mm2 \n\t"
  2322. "punpckhwd %%mm4, %%mm1 \n\t"
  2323. "movd %%mm0, (%0) \n\t"
  2324. "psrlq $32, %%mm0 \n\t"
  2325. "movd %%mm0, (%%eax) \n\t"
  2326. "movd %%mm3, (%%eax, %1) \n\t"
  2327. "psrlq $32, %%mm3 \n\t"
  2328. "movd %%mm3, (%%eax, %1, 2) \n\t"
  2329. "movd %%mm2, (%0, %1, 4) \n\t"
  2330. "psrlq $32, %%mm2 \n\t"
  2331. "movd %%mm2, (%%ebx) \n\t"
  2332. "movd %%mm1, (%%ebx, %1) \n\t"
  2333. "psrlq $32, %%mm1 \n\t"
  2334. "movd %%mm1, (%%ebx, %1, 2) \n\t"
  2335. "movq 64(%2), %%mm0 \n\t" // 12345678
  2336. "movq 80(%2), %%mm1 \n\t" // abcdefgh
  2337. "movq %%mm0, %%mm2 \n\t" // 12345678
  2338. "punpcklbw %%mm1, %%mm0 \n\t" // 1a2b3c4d
  2339. "punpckhbw %%mm1, %%mm2 \n\t" // 5e6f7g8h
  2340. "movq 96(%2), %%mm1 \n\t"
  2341. "movq 112(%2), %%mm3 \n\t"
  2342. "movq %%mm1, %%mm4 \n\t"
  2343. "punpcklbw %%mm3, %%mm1 \n\t"
  2344. "punpckhbw %%mm3, %%mm4 \n\t"
  2345. "movq %%mm0, %%mm3 \n\t"
  2346. "punpcklwd %%mm1, %%mm0 \n\t"
  2347. "punpckhwd %%mm1, %%mm3 \n\t"
  2348. "movq %%mm2, %%mm1 \n\t"
  2349. "punpcklwd %%mm4, %%mm2 \n\t"
  2350. "punpckhwd %%mm4, %%mm1 \n\t"
  2351. "movd %%mm0, 4(%0) \n\t"
  2352. "psrlq $32, %%mm0 \n\t"
  2353. "movd %%mm0, 4(%%eax) \n\t"
  2354. "movd %%mm3, 4(%%eax, %1) \n\t"
  2355. "psrlq $32, %%mm3 \n\t"
  2356. "movd %%mm3, 4(%%eax, %1, 2) \n\t"
  2357. "movd %%mm2, 4(%0, %1, 4) \n\t"
  2358. "psrlq $32, %%mm2 \n\t"
  2359. "movd %%mm2, 4(%%ebx) \n\t"
  2360. "movd %%mm1, 4(%%ebx, %1) \n\t"
  2361. "psrlq $32, %%mm1 \n\t"
  2362. "movd %%mm1, 4(%%ebx, %1, 2) \n\t"
  2363. :: "r" (dst), "r" (dstStride), "r" (src)
  2364. : "%eax", "%ebx"
  2365. );
  2366. }
  2367. #endif
  2368. #ifdef HAVE_ODIVX_POSTPROCESS
  2369. #include "../opendivx/postprocess.h"
  2370. int use_old_pp=0;
  2371. #endif
  2372. static void postProcess(uint8_t src[], int srcStride, uint8_t dst[], int dstStride, int width, int height,
  2373. QP_STORE_T QPs[], int QPStride, int isColor, int mode);
  2374. /* -pp Command line Help
  2375. NOTE/FIXME: put this at an appropriate place (--help, html docs, man mplayer)?
  2376. -pp <filterName>[:<option>[:<option>...]][,[-]<filterName>[:<option>...]]...
  2377. long form example:
  2378. -pp vdeblock:autoq,hdeblock:autoq,linblenddeint -pp default,-vdeblock
  2379. short form example:
  2380. -pp vb:a,hb:a,lb -pp de,-vb
  2381. Filters Options
  2382. short long name short long option Description
  2383. * * a autoq cpu power dependant enabler
  2384. c chrom chrominance filtring enabled
  2385. y nochrom chrominance filtring disabled
  2386. hb hdeblock horizontal deblocking filter
  2387. vb vdeblock vertical deblocking filter
  2388. vr rkvdeblock
  2389. h1 x1hdeblock Experimental horizontal deblock filter 1
  2390. v1 x1vdeblock Experimental vertical deblock filter 1
  2391. dr dering not implemented yet
  2392. al autolevels automatic brightness / contrast fixer
  2393. f fullyrange stretch luminance range to (0..255)
  2394. lb linblenddeint linear blend deinterlacer
  2395. li linipoldeint linear interpolating deinterlacer
  2396. ci cubicipoldeint cubic interpolating deinterlacer
  2397. md mediandeint median deinterlacer
  2398. de default hdeblock:a,vdeblock:a,dering:a,autolevels
  2399. fa fast x1hdeblock:a,x1vdeblock:a,dering:a,autolevels
  2400. */
  2401. /**
  2402. * returns a PPMode struct which will have a non 0 error variable if an error occured
  2403. * name is the string after "-pp" on the command line
  2404. * quality is a number from 0 to GET_PP_QUALITY_MAX
  2405. */
  2406. struct PPMode getPPModeByNameAndQuality(char *name, int quality)
  2407. {
  2408. char temp[GET_MODE_BUFFER_SIZE];
  2409. char *p= temp;
  2410. char *filterDelimiters= ",";
  2411. char *optionDelimiters= ":";
  2412. struct PPMode ppMode= {0,0,0,0,0,0};
  2413. char *filterToken;
  2414. strncpy(temp, name, GET_MODE_BUFFER_SIZE);
  2415. for(;;){
  2416. char *filterName;
  2417. int q= GET_PP_QUALITY_MAX;
  2418. int chrom=-1;
  2419. char *option;
  2420. char *options[OPTIONS_ARRAY_SIZE];
  2421. int i;
  2422. int filterNameOk=0;
  2423. int numOfUnknownOptions=0;
  2424. int enable=1; //does the user want us to enabled or disabled the filter
  2425. filterToken= strtok(p, filterDelimiters);
  2426. if(filterToken == NULL) break;
  2427. p+= strlen(filterToken) + 1;
  2428. filterName= strtok(filterToken, optionDelimiters);
  2429. printf("%s::%s\n", filterToken, filterName);
  2430. if(*filterName == '-')
  2431. {
  2432. enable=0;
  2433. filterName++;
  2434. }
  2435. for(;;){ //for all options
  2436. option= strtok(NULL, optionDelimiters);
  2437. if(option == NULL) break;
  2438. printf("%s\n", option);
  2439. if(!strcmp("autoq", option) || !strcmp("a", option)) q= quality;
  2440. else if(!strcmp("nochrom", option) || !strcmp("y", option)) chrom=0;
  2441. else if(!strcmp("chrom", option) || !strcmp("c", option)) chrom=1;
  2442. else
  2443. {
  2444. options[numOfUnknownOptions] = option;
  2445. numOfUnknownOptions++;
  2446. options[numOfUnknownOptions] = NULL;
  2447. }
  2448. if(numOfUnknownOptions >= OPTIONS_ARRAY_SIZE-1) break;
  2449. }
  2450. /* replace stuff from the replace Table */
  2451. for(i=0; replaceTable[2*i]!=NULL; i++)
  2452. {
  2453. if(!strcmp(replaceTable[2*i], filterName))
  2454. {
  2455. int newlen= strlen(replaceTable[2*i + 1]);
  2456. int plen;
  2457. int spaceLeft;
  2458. if(p==NULL) p= temp, *p=0; //last filter
  2459. else p--, *p=','; //not last filter
  2460. plen= strlen(p);
  2461. spaceLeft= (int)p - (int)temp + plen;
  2462. if(spaceLeft + newlen >= GET_MODE_BUFFER_SIZE)
  2463. {
  2464. ppMode.error++;
  2465. break;
  2466. }
  2467. memmove(p + newlen, p, plen+1);
  2468. memcpy(p, replaceTable[2*i + 1], newlen);
  2469. filterNameOk=1;
  2470. }
  2471. }
  2472. for(i=0; filters[i].shortName!=NULL; i++)
  2473. {
  2474. if( !strcmp(filters[i].longName, filterName)
  2475. || !strcmp(filters[i].shortName, filterName))
  2476. {
  2477. ppMode.lumMode &= ~filters[i].mask;
  2478. ppMode.chromMode &= ~filters[i].mask;
  2479. filterNameOk=1;
  2480. if(!enable) break; // user wants to disable it
  2481. if(q >= filters[i].minLumQuality)
  2482. ppMode.lumMode|= filters[i].mask;
  2483. if(chrom==1 || (chrom==-1 && filters[i].chromDefault))
  2484. if(q >= filters[i].minChromQuality)
  2485. ppMode.chromMode|= filters[i].mask;
  2486. if(filters[i].mask == LEVEL_FIX)
  2487. {
  2488. int o;
  2489. ppMode.minAllowedY= 16;
  2490. ppMode.maxAllowedY= 234;
  2491. for(o=0; options[o]!=NULL; o++)
  2492. if( !strcmp(options[o],"fullyrange")
  2493. ||!strcmp(options[o],"f"))
  2494. {
  2495. ppMode.minAllowedY= 0;
  2496. ppMode.maxAllowedY= 255;
  2497. numOfUnknownOptions--;
  2498. }
  2499. }
  2500. }
  2501. }
  2502. if(!filterNameOk) ppMode.error++;
  2503. ppMode.error += numOfUnknownOptions;
  2504. }
  2505. if(ppMode.lumMode & H_DEBLOCK) ppMode.oldMode |= PP_DEBLOCK_Y_H;
  2506. if(ppMode.lumMode & V_DEBLOCK) ppMode.oldMode |= PP_DEBLOCK_Y_V;
  2507. if(ppMode.chromMode & H_DEBLOCK) ppMode.oldMode |= PP_DEBLOCK_C_H;
  2508. if(ppMode.chromMode & V_DEBLOCK) ppMode.oldMode |= PP_DEBLOCK_C_V;
  2509. if(ppMode.lumMode & DERING) ppMode.oldMode |= PP_DERING_Y;
  2510. if(ppMode.chromMode & DERING) ppMode.oldMode |= PP_DERING_C;
  2511. return ppMode;
  2512. }
  2513. /**
  2514. * ...
  2515. */
  2516. void postprocess(unsigned char * src[], int src_stride,
  2517. unsigned char * dst[], int dst_stride,
  2518. int horizontal_size, int vertical_size,
  2519. QP_STORE_T *QP_store, int QP_stride,
  2520. int mode)
  2521. {
  2522. /*
  2523. static int qual=0;
  2524. struct PPMode ppMode= getPPModeByNameAndQuality("fast,default,-hdeblock,-vdeblock", qual);
  2525. qual++;
  2526. qual%=7;
  2527. printf("\n%d %d %d %d\n", ppMode.lumMode, ppMode.chromMode, ppMode.oldMode, ppMode.error);
  2528. postprocess2(src, src_stride, dst, dst_stride,
  2529. horizontal_size, vertical_size, QP_store, QP_stride, &ppMode);
  2530. return;
  2531. */
  2532. #ifdef HAVE_ODIVX_POSTPROCESS
  2533. // Note: I could make this shit outside of this file, but it would mean one
  2534. // more function call...
  2535. if(use_old_pp){
  2536. odivx_postprocess(src,src_stride,dst,dst_stride,horizontal_size,vertical_size,QP_store,QP_stride,mode);
  2537. return;
  2538. }
  2539. #endif
  2540. postProcess(src[0], src_stride, dst[0], dst_stride,
  2541. horizontal_size, vertical_size, QP_store, QP_stride, 0, mode);
  2542. horizontal_size >>= 1;
  2543. vertical_size >>= 1;
  2544. src_stride >>= 1;
  2545. dst_stride >>= 1;
  2546. mode= ((mode&0xFF)>>4) | (mode&0xFFFFFF00);
  2547. // mode&= ~(LINEAR_IPOL_DEINT_FILTER | LINEAR_BLEND_DEINT_FILTER |
  2548. // MEDIAN_DEINT_FILTER | CUBIC_IPOL_DEINT_FILTER);
  2549. if(1)
  2550. {
  2551. postProcess(src[1], src_stride, dst[1], dst_stride,
  2552. horizontal_size, vertical_size, QP_store, QP_stride, 1, mode);
  2553. postProcess(src[2], src_stride, dst[2], dst_stride,
  2554. horizontal_size, vertical_size, QP_store, QP_stride, 2, mode);
  2555. }
  2556. else
  2557. {
  2558. memcpy(dst[1], src[1], src_stride*horizontal_size);
  2559. memcpy(dst[2], src[2], src_stride*horizontal_size);
  2560. }
  2561. }
  2562. void postprocess2(unsigned char * src[], int src_stride,
  2563. unsigned char * dst[], int dst_stride,
  2564. int horizontal_size, int vertical_size,
  2565. QP_STORE_T *QP_store, int QP_stride,
  2566. struct PPMode *mode)
  2567. {
  2568. #ifdef HAVE_ODIVX_POSTPROCESS
  2569. // Note: I could make this shit outside of this file, but it would mean one
  2570. // more function call...
  2571. if(use_old_pp){
  2572. odivx_postprocess(src,src_stride,dst,dst_stride,horizontal_size,vertical_size,QP_store,QP_stride,
  2573. mode->oldMode);
  2574. return;
  2575. }
  2576. #endif
  2577. postProcess(src[0], src_stride, dst[0], dst_stride,
  2578. horizontal_size, vertical_size, QP_store, QP_stride, 0, mode->lumMode);
  2579. horizontal_size >>= 1;
  2580. vertical_size >>= 1;
  2581. src_stride >>= 1;
  2582. dst_stride >>= 1;
  2583. postProcess(src[1], src_stride, dst[1], dst_stride,
  2584. horizontal_size, vertical_size, QP_store, QP_stride, 1, mode->chromMode);
  2585. postProcess(src[2], src_stride, dst[2], dst_stride,
  2586. horizontal_size, vertical_size, QP_store, QP_stride, 2, mode->chromMode);
  2587. }
  2588. /**
  2589. * gets the mode flags for a given quality (larger values mean slower but better postprocessing)
  2590. * 0 <= quality <= 6
  2591. */
  2592. int getPpModeForQuality(int quality){
  2593. int modes[1+GET_PP_QUALITY_MAX]= {
  2594. 0,
  2595. #if 1
  2596. // horizontal filters first
  2597. LUM_H_DEBLOCK,
  2598. LUM_H_DEBLOCK | LUM_V_DEBLOCK,
  2599. LUM_H_DEBLOCK | LUM_V_DEBLOCK | CHROM_H_DEBLOCK,
  2600. LUM_H_DEBLOCK | LUM_V_DEBLOCK | CHROM_H_DEBLOCK | CHROM_V_DEBLOCK,
  2601. LUM_H_DEBLOCK | LUM_V_DEBLOCK | CHROM_H_DEBLOCK | CHROM_V_DEBLOCK | LUM_DERING,
  2602. LUM_H_DEBLOCK | LUM_V_DEBLOCK | CHROM_H_DEBLOCK | CHROM_V_DEBLOCK | LUM_DERING | CHROM_DERING
  2603. #else
  2604. // vertical filters first
  2605. LUM_V_DEBLOCK,
  2606. LUM_V_DEBLOCK | LUM_H_DEBLOCK,
  2607. LUM_V_DEBLOCK | LUM_H_DEBLOCK | CHROM_V_DEBLOCK,
  2608. LUM_V_DEBLOCK | LUM_H_DEBLOCK | CHROM_V_DEBLOCK | CHROM_H_DEBLOCK,
  2609. LUM_V_DEBLOCK | LUM_H_DEBLOCK | CHROM_V_DEBLOCK | CHROM_H_DEBLOCK | LUM_DERING,
  2610. LUM_V_DEBLOCK | LUM_H_DEBLOCK | CHROM_V_DEBLOCK | CHROM_H_DEBLOCK | LUM_DERING | CHROM_DERING
  2611. #endif
  2612. };
  2613. #ifdef HAVE_ODIVX_POSTPROCESS
  2614. int odivx_modes[1+GET_PP_QUALITY_MAX]= {
  2615. 0,
  2616. PP_DEBLOCK_Y_H,
  2617. PP_DEBLOCK_Y_H|PP_DEBLOCK_Y_V,
  2618. PP_DEBLOCK_Y_H|PP_DEBLOCK_Y_V|PP_DEBLOCK_C_H,
  2619. PP_DEBLOCK_Y_H|PP_DEBLOCK_Y_V|PP_DEBLOCK_C_H|PP_DEBLOCK_C_V,
  2620. PP_DEBLOCK_Y_H|PP_DEBLOCK_Y_V|PP_DEBLOCK_C_H|PP_DEBLOCK_C_V|PP_DERING_Y,
  2621. PP_DEBLOCK_Y_H|PP_DEBLOCK_Y_V|PP_DEBLOCK_C_H|PP_DEBLOCK_C_V|PP_DERING_Y|PP_DERING_C
  2622. };
  2623. if(use_old_pp) return odivx_modes[quality];
  2624. #endif
  2625. return modes[quality];
  2626. }
  2627. /**
  2628. * Copies a block from src to dst and fixes the blacklevel
  2629. * numLines must be a multiple of 4
  2630. * levelFix == 0 -> dont touch the brighness & contrast
  2631. */
  2632. static inline void blockCopy(uint8_t dst[], int dstStride, uint8_t src[], int srcStride,
  2633. int numLines, int levelFix)
  2634. {
  2635. #ifndef HAVE_MMX
  2636. int i;
  2637. #endif
  2638. if(levelFix)
  2639. {
  2640. #ifdef HAVE_MMX
  2641. asm volatile(
  2642. "leal (%2,%2), %%eax \n\t"
  2643. "leal (%3,%3), %%ebx \n\t"
  2644. "movq packedYOffset, %%mm2 \n\t"
  2645. "movq packedYScale, %%mm3 \n\t"
  2646. "pxor %%mm4, %%mm4 \n\t"
  2647. #define SCALED_CPY \
  2648. "movq (%0), %%mm0 \n\t"\
  2649. "movq (%0), %%mm5 \n\t"\
  2650. "punpcklbw %%mm4, %%mm0 \n\t"\
  2651. "punpckhbw %%mm4, %%mm5 \n\t"\
  2652. "psubw %%mm2, %%mm0 \n\t"\
  2653. "psubw %%mm2, %%mm5 \n\t"\
  2654. "movq (%0,%2), %%mm1 \n\t"\
  2655. "psllw $6, %%mm0 \n\t"\
  2656. "psllw $6, %%mm5 \n\t"\
  2657. "pmulhw %%mm3, %%mm0 \n\t"\
  2658. "movq (%0,%2), %%mm6 \n\t"\
  2659. "pmulhw %%mm3, %%mm5 \n\t"\
  2660. "punpcklbw %%mm4, %%mm1 \n\t"\
  2661. "punpckhbw %%mm4, %%mm6 \n\t"\
  2662. "psubw %%mm2, %%mm1 \n\t"\
  2663. "psubw %%mm2, %%mm6 \n\t"\
  2664. "psllw $6, %%mm1 \n\t"\
  2665. "psllw $6, %%mm6 \n\t"\
  2666. "pmulhw %%mm3, %%mm1 \n\t"\
  2667. "pmulhw %%mm3, %%mm6 \n\t"\
  2668. "addl %%eax, %0 \n\t"\
  2669. "packuswb %%mm5, %%mm0 \n\t"\
  2670. "packuswb %%mm6, %%mm1 \n\t"\
  2671. "movq %%mm0, (%1) \n\t"\
  2672. "movq %%mm1, (%1, %3) \n\t"\
  2673. SCALED_CPY
  2674. "addl %%ebx, %1 \n\t"
  2675. SCALED_CPY
  2676. "addl %%ebx, %1 \n\t"
  2677. SCALED_CPY
  2678. "addl %%ebx, %1 \n\t"
  2679. SCALED_CPY
  2680. : "+r"(src),
  2681. "+r"(dst)
  2682. :"r" (srcStride),
  2683. "r" (dstStride)
  2684. : "%eax", "%ebx"
  2685. );
  2686. #else
  2687. for(i=0; i<numLines; i++)
  2688. memcpy( &(dst[dstStride*i]),
  2689. &(src[srcStride*i]), BLOCK_SIZE);
  2690. #endif
  2691. }
  2692. else
  2693. {
  2694. #ifdef HAVE_MMX
  2695. asm volatile(
  2696. "movl %4, %%eax \n\t"
  2697. "movl %%eax, temp0\n\t"
  2698. "pushl %0 \n\t"
  2699. "pushl %1 \n\t"
  2700. "leal (%2,%2), %%eax \n\t"
  2701. "leal (%3,%3), %%ebx \n\t"
  2702. "movq packedYOffset, %%mm2 \n\t"
  2703. "movq packedYScale, %%mm3 \n\t"
  2704. #define SIMPLE_CPY \
  2705. "movq (%0), %%mm0 \n\t"\
  2706. "movq (%0,%2), %%mm1 \n\t"\
  2707. "movq %%mm0, (%1) \n\t"\
  2708. "movq %%mm1, (%1, %3) \n\t"\
  2709. "1: \n\t"
  2710. SIMPLE_CPY
  2711. "addl %%eax, %0 \n\t"
  2712. "addl %%ebx, %1 \n\t"
  2713. SIMPLE_CPY
  2714. "addl %%eax, %0 \n\t"
  2715. "addl %%ebx, %1 \n\t"
  2716. "decl temp0 \n\t"
  2717. "jnz 1b \n\t"
  2718. "popl %1 \n\t"
  2719. "popl %0 \n\t"
  2720. : : "r" (src),
  2721. "r" (dst),
  2722. "r" (srcStride),
  2723. "r" (dstStride),
  2724. "m" (numLines>>2)
  2725. : "%eax", "%ebx"
  2726. );
  2727. #else
  2728. for(i=0; i<numLines; i++)
  2729. memcpy( &(dst[dstStride*i]),
  2730. &(src[srcStride*i]), BLOCK_SIZE);
  2731. #endif
  2732. }
  2733. }
  2734. /**
  2735. * Filters array of bytes (Y or U or V values)
  2736. */
  2737. static void postProcess(uint8_t src[], int srcStride, uint8_t dst[], int dstStride, int width, int height,
  2738. QP_STORE_T QPs[], int QPStride, int isColor, int mode)
  2739. {
  2740. int x,y;
  2741. /* we need 64bit here otherwise we´ll going to have a problem
  2742. after watching a black picture for 5 hours*/
  2743. static uint64_t *yHistogram= NULL;
  2744. int black=0, white=255; // blackest black and whitest white in the picture
  2745. int QPCorrecture= 256;
  2746. /* Temporary buffers for handling the last row(s) */
  2747. static uint8_t *tempDst= NULL;
  2748. static uint8_t *tempSrc= NULL;
  2749. /* Temporary buffers for handling the last block */
  2750. static uint8_t *tempDstBlock= NULL;
  2751. static uint8_t *tempSrcBlock= NULL;
  2752. #ifdef PP_FUNNY_STRIDE
  2753. uint8_t *dstBlockPtrBackup;
  2754. uint8_t *srcBlockPtrBackup;
  2755. #endif
  2756. #ifdef MORE_TIMING
  2757. long long T0, T1, diffTime=0;
  2758. #endif
  2759. #ifdef TIMING
  2760. long long memcpyTime=0, vertTime=0, horizTime=0, sumTime;
  2761. sumTime= rdtsc();
  2762. #endif
  2763. //mode= 0x7F;
  2764. if(tempDst==NULL)
  2765. {
  2766. tempDst= (uint8_t*)memalign(8, 1024*24);
  2767. tempSrc= (uint8_t*)memalign(8, 1024*24);
  2768. tempDstBlock= (uint8_t*)memalign(8, 1024*24);
  2769. tempSrcBlock= (uint8_t*)memalign(8, 1024*24);
  2770. }
  2771. if(!yHistogram)
  2772. {
  2773. int i;
  2774. yHistogram= (uint64_t*)malloc(8*256);
  2775. for(i=0; i<256; i++) yHistogram[i]= width*height/64*15/256;
  2776. if(mode & FULL_Y_RANGE)
  2777. {
  2778. maxAllowedY=255;
  2779. minAllowedY=0;
  2780. }
  2781. }
  2782. if(!isColor)
  2783. {
  2784. uint64_t sum= 0;
  2785. int i;
  2786. static int framenum= -1;
  2787. uint64_t maxClipped;
  2788. uint64_t clipped;
  2789. double scale;
  2790. framenum++;
  2791. if(framenum == 1) yHistogram[0]= width*height/64*15/256;
  2792. for(i=0; i<256; i++)
  2793. {
  2794. sum+= yHistogram[i];
  2795. // printf("%d ", yHistogram[i]);
  2796. }
  2797. // printf("\n\n");
  2798. /* we allways get a completly black picture first */
  2799. maxClipped= (uint64_t)(sum * maxClippedThreshold);
  2800. clipped= sum;
  2801. for(black=255; black>0; black--)
  2802. {
  2803. if(clipped < maxClipped) break;
  2804. clipped-= yHistogram[black];
  2805. }
  2806. clipped= sum;
  2807. for(white=0; white<256; white++)
  2808. {
  2809. if(clipped < maxClipped) break;
  2810. clipped-= yHistogram[white];
  2811. }
  2812. packedYOffset= (black - minAllowedY) & 0xFFFF;
  2813. packedYOffset|= packedYOffset<<32;
  2814. packedYOffset|= packedYOffset<<16;
  2815. scale= (double)(maxAllowedY - minAllowedY) / (double)(white-black);
  2816. packedYScale= (uint16_t)(scale*1024.0 + 0.5);
  2817. packedYScale|= packedYScale<<32;
  2818. packedYScale|= packedYScale<<16;
  2819. }
  2820. else
  2821. {
  2822. packedYScale= 0x0100010001000100LL;
  2823. packedYOffset= 0;
  2824. }
  2825. if(mode & LEVEL_FIX) QPCorrecture= packedYScale &0xFFFF;
  2826. else QPCorrecture= 256;
  2827. /* copy & deinterlace first row of blocks */
  2828. y=-BLOCK_SIZE;
  2829. {
  2830. //1% speedup if these are here instead of the inner loop
  2831. uint8_t *srcBlock= &(src[y*srcStride]);
  2832. uint8_t *dstBlock= &(dst[y*dstStride]);
  2833. dstBlock= tempDst + dstStride;
  2834. // From this point on it is guranteed that we can read and write 16 lines downward
  2835. // finish 1 block before the next otherwise we´ll might have a problem
  2836. // with the L1 Cache of the P4 ... or only a few blocks at a time or soemthing
  2837. for(x=0; x<width; x+=BLOCK_SIZE)
  2838. {
  2839. #ifdef HAVE_MMX2
  2840. /*
  2841. prefetchnta(srcBlock + (((x>>3)&3) + 5)*srcStride + 32);
  2842. prefetchnta(srcBlock + (((x>>3)&3) + 9)*srcStride + 32);
  2843. prefetcht0(dstBlock + (((x>>3)&3) + 5)*dstStride + 32);
  2844. prefetcht0(dstBlock + (((x>>3)&3) + 9)*dstStride + 32);
  2845. */
  2846. /*
  2847. prefetchnta(srcBlock + (((x>>2)&6) + 5)*srcStride + 32);
  2848. prefetchnta(srcBlock + (((x>>2)&6) + 6)*srcStride + 32);
  2849. prefetcht0(dstBlock + (((x>>2)&6) + 5)*dstStride + 32);
  2850. prefetcht0(dstBlock + (((x>>2)&6) + 6)*dstStride + 32);
  2851. */
  2852. asm(
  2853. "movl %4, %%eax \n\t"
  2854. "shrl $2, %%eax \n\t"
  2855. "andl $6, %%eax \n\t"
  2856. "addl $8, %%eax \n\t"
  2857. "movl %%eax, %%ebx \n\t"
  2858. "imul %1, %%eax \n\t"
  2859. "imul %3, %%ebx \n\t"
  2860. "prefetchnta 32(%%eax, %0) \n\t"
  2861. "prefetcht0 32(%%ebx, %2) \n\t"
  2862. "addl %1, %%eax \n\t"
  2863. "addl %3, %%ebx \n\t"
  2864. "prefetchnta 32(%%eax, %0) \n\t"
  2865. "prefetcht0 32(%%ebx, %2) \n\t"
  2866. :: "r" (srcBlock), "r" (srcStride), "r" (dstBlock), "r" (dstStride),
  2867. "m" (x)
  2868. : "%eax", "%ebx"
  2869. );
  2870. #elif defined(HAVE_3DNOW)
  2871. //FIXME check if this is faster on an 3dnow chip or if its faster without the prefetch or ...
  2872. /* prefetch(srcBlock + (((x>>3)&3) + 5)*srcStride + 32);
  2873. prefetch(srcBlock + (((x>>3)&3) + 9)*srcStride + 32);
  2874. prefetchw(dstBlock + (((x>>3)&3) + 5)*dstStride + 32);
  2875. prefetchw(dstBlock + (((x>>3)&3) + 9)*dstStride + 32);
  2876. */
  2877. #endif
  2878. blockCopy(dstBlock + dstStride*8, dstStride,
  2879. srcBlock + srcStride*8, srcStride, 8, mode & LEVEL_FIX);
  2880. if(mode & LINEAR_IPOL_DEINT_FILTER)
  2881. deInterlaceInterpolateLinear(dstBlock, dstStride);
  2882. else if(mode & LINEAR_BLEND_DEINT_FILTER)
  2883. deInterlaceBlendLinear(dstBlock, dstStride);
  2884. else if(mode & MEDIAN_DEINT_FILTER)
  2885. deInterlaceMedian(dstBlock, dstStride);
  2886. else if(mode & CUBIC_IPOL_DEINT_FILTER)
  2887. deInterlaceInterpolateCubic(dstBlock, dstStride);
  2888. /* else if(mode & CUBIC_BLEND_DEINT_FILTER)
  2889. deInterlaceBlendCubic(dstBlock, dstStride);
  2890. */
  2891. dstBlock+=8;
  2892. srcBlock+=8;
  2893. }
  2894. memcpy(&(dst[y*dstStride]) + 8*dstStride, tempDst + 9*dstStride, 8*dstStride );
  2895. }
  2896. for(y=0; y<height; y+=BLOCK_SIZE)
  2897. {
  2898. //1% speedup if these are here instead of the inner loop
  2899. uint8_t *srcBlock= &(src[y*srcStride]);
  2900. uint8_t *dstBlock= &(dst[y*dstStride]);
  2901. #ifdef ARCH_X86
  2902. int *QPptr= isColor ? &QPs[(y>>3)*QPStride] :&QPs[(y>>4)*QPStride];
  2903. int QPDelta= isColor ? 1<<(32-3) : 1<<(32-4);
  2904. int QPFrac= QPDelta;
  2905. uint8_t *tempBlock1= tempBlocks;
  2906. uint8_t *tempBlock2= tempBlocks + 8;
  2907. #endif
  2908. /* can we mess with a 8x16 block from srcBlock/dstBlock downwards and 1 line upwards
  2909. if not than use a temporary buffer */
  2910. if(y+15 >= height)
  2911. {
  2912. /* copy from line 8 to 15 of src, these will be copied with
  2913. blockcopy to dst later */
  2914. memcpy(tempSrc + srcStride*8, srcBlock + srcStride*8,
  2915. srcStride*MAX(height-y-8, 0) );
  2916. /* duplicate last line to fill the void upto line 15 */
  2917. if(y+15 >= height)
  2918. {
  2919. int i;
  2920. for(i=height-y; i<=15; i++)
  2921. memcpy(tempSrc + srcStride*i,
  2922. src + srcStride*(height-1), srcStride);
  2923. }
  2924. /* copy up to 9 lines of dst */
  2925. memcpy(tempDst, dstBlock - dstStride, dstStride*MIN(height-y+1, 9) );
  2926. dstBlock= tempDst + dstStride;
  2927. srcBlock= tempSrc;
  2928. }
  2929. // From this point on it is guranteed that we can read and write 16 lines downward
  2930. // finish 1 block before the next otherwise we´ll might have a problem
  2931. // with the L1 Cache of the P4 ... or only a few blocks at a time or soemthing
  2932. for(x=0; x<width; x+=BLOCK_SIZE)
  2933. {
  2934. const int stride= dstStride;
  2935. uint8_t *tmpXchg;
  2936. #ifdef ARCH_X86
  2937. int QP= *QPptr;
  2938. asm volatile(
  2939. "addl %2, %1 \n\t"
  2940. "sbbl %%eax, %%eax \n\t"
  2941. "shll $2, %%eax \n\t"
  2942. "subl %%eax, %0 \n\t"
  2943. : "+r" (QPptr), "+m" (QPFrac)
  2944. : "r" (QPDelta)
  2945. : "%eax"
  2946. );
  2947. #else
  2948. int QP= isColor ?
  2949. QPs[(y>>3)*QPStride + (x>>3)]:
  2950. QPs[(y>>4)*QPStride + (x>>4)];
  2951. #endif
  2952. if(!isColor)
  2953. {
  2954. QP= (QP* QPCorrecture)>>8;
  2955. yHistogram[ srcBlock[srcStride*12 + 4] ]++;
  2956. }
  2957. #ifdef HAVE_MMX
  2958. asm volatile(
  2959. "movd %0, %%mm7 \n\t"
  2960. "packuswb %%mm7, %%mm7 \n\t" // 0, 0, 0, QP, 0, 0, 0, QP
  2961. "packuswb %%mm7, %%mm7 \n\t" // 0,QP, 0, QP, 0,QP, 0, QP
  2962. "packuswb %%mm7, %%mm7 \n\t" // QP,..., QP
  2963. "movq %%mm7, pQPb \n\t"
  2964. : : "r" (QP)
  2965. );
  2966. #endif
  2967. #ifdef MORE_TIMING
  2968. T0= rdtsc();
  2969. #endif
  2970. #ifdef HAVE_MMX2
  2971. /*
  2972. prefetchnta(srcBlock + (((x>>3)&3) + 5)*srcStride + 32);
  2973. prefetchnta(srcBlock + (((x>>3)&3) + 9)*srcStride + 32);
  2974. prefetcht0(dstBlock + (((x>>3)&3) + 5)*dstStride + 32);
  2975. prefetcht0(dstBlock + (((x>>3)&3) + 9)*dstStride + 32);
  2976. */
  2977. /*
  2978. prefetchnta(srcBlock + (((x>>2)&6) + 5)*srcStride + 32);
  2979. prefetchnta(srcBlock + (((x>>2)&6) + 6)*srcStride + 32);
  2980. prefetcht0(dstBlock + (((x>>2)&6) + 5)*dstStride + 32);
  2981. prefetcht0(dstBlock + (((x>>2)&6) + 6)*dstStride + 32);
  2982. */
  2983. asm(
  2984. "movl %4, %%eax \n\t"
  2985. "shrl $2, %%eax \n\t"
  2986. "andl $6, %%eax \n\t"
  2987. "addl $8, %%eax \n\t"
  2988. "movl %%eax, %%ebx \n\t"
  2989. "imul %1, %%eax \n\t"
  2990. "imul %3, %%ebx \n\t"
  2991. "prefetchnta 32(%%eax, %0) \n\t"
  2992. "prefetcht0 32(%%ebx, %2) \n\t"
  2993. "addl %1, %%eax \n\t"
  2994. "addl %3, %%ebx \n\t"
  2995. "prefetchnta 32(%%eax, %0) \n\t"
  2996. "prefetcht0 32(%%ebx, %2) \n\t"
  2997. :: "r" (srcBlock), "r" (srcStride), "r" (dstBlock), "r" (dstStride),
  2998. "m" (x)
  2999. : "%eax", "%ebx"
  3000. );
  3001. #elif defined(HAVE_3DNOW)
  3002. //FIXME check if this is faster on an 3dnow chip or if its faster without the prefetch or ...
  3003. /* prefetch(srcBlock + (((x>>3)&3) + 5)*srcStride + 32);
  3004. prefetch(srcBlock + (((x>>3)&3) + 9)*srcStride + 32);
  3005. prefetchw(dstBlock + (((x>>3)&3) + 5)*dstStride + 32);
  3006. prefetchw(dstBlock + (((x>>3)&3) + 9)*dstStride + 32);
  3007. */
  3008. #endif
  3009. #ifdef PP_FUNNY_STRIDE
  3010. //can we mess with a 8x16 block, if not use a temp buffer, yes again
  3011. if(x+7 >= width)
  3012. {
  3013. int i;
  3014. dstBlockPtrBackup= dstBlock;
  3015. srcBlockPtrBackup= srcBlock;
  3016. for(i=0;i<BLOCK_SIZE*2; i++)
  3017. {
  3018. memcpy(tempSrcBlock+i*srcStride, srcBlock+i*srcStride, width-x);
  3019. memcpy(tempDstBlock+i*dstStride, dstBlock+i*dstStride, width-x);
  3020. }
  3021. dstBlock= tempDstBlock;
  3022. srcBlock= tempSrcBlock;
  3023. }
  3024. #endif
  3025. blockCopy(dstBlock + dstStride*8, dstStride,
  3026. srcBlock + srcStride*8, srcStride, 8, mode & LEVEL_FIX);
  3027. if(mode & LINEAR_IPOL_DEINT_FILTER)
  3028. deInterlaceInterpolateLinear(dstBlock, dstStride);
  3029. else if(mode & LINEAR_BLEND_DEINT_FILTER)
  3030. deInterlaceBlendLinear(dstBlock, dstStride);
  3031. else if(mode & MEDIAN_DEINT_FILTER)
  3032. deInterlaceMedian(dstBlock, dstStride);
  3033. else if(mode & CUBIC_IPOL_DEINT_FILTER)
  3034. deInterlaceInterpolateCubic(dstBlock, dstStride);
  3035. /* else if(mode & CUBIC_BLEND_DEINT_FILTER)
  3036. deInterlaceBlendCubic(dstBlock, dstStride);
  3037. */
  3038. /* only deblock if we have 2 blocks */
  3039. if(y + 8 < height)
  3040. {
  3041. #ifdef MORE_TIMING
  3042. T1= rdtsc();
  3043. memcpyTime+= T1-T0;
  3044. T0=T1;
  3045. #endif
  3046. if(mode & V_RK1_FILTER)
  3047. vertRK1Filter(dstBlock, stride, QP);
  3048. else if(mode & V_X1_FILTER)
  3049. vertX1Filter(dstBlock, stride, QP);
  3050. else if(mode & V_DEBLOCK)
  3051. {
  3052. if( isVertDC(dstBlock, stride))
  3053. {
  3054. if(isVertMinMaxOk(dstBlock, stride, QP))
  3055. doVertLowPass(dstBlock, stride, QP);
  3056. }
  3057. else
  3058. doVertDefFilter(dstBlock, stride, QP);
  3059. }
  3060. #ifdef MORE_TIMING
  3061. T1= rdtsc();
  3062. vertTime+= T1-T0;
  3063. T0=T1;
  3064. #endif
  3065. }
  3066. #ifdef HAVE_MMX
  3067. transpose1(tempBlock1, tempBlock2, dstBlock, dstStride);
  3068. #endif
  3069. /* check if we have a previous block to deblock it with dstBlock */
  3070. if(x - 8 >= 0)
  3071. {
  3072. #ifdef MORE_TIMING
  3073. T0= rdtsc();
  3074. #endif
  3075. #ifdef HAVE_MMX
  3076. if(mode & H_RK1_FILTER)
  3077. vertRK1Filter(tempBlock1, 16, QP);
  3078. else if(mode & H_X1_FILTER)
  3079. vertX1Filter(tempBlock1, 16, QP);
  3080. else if(mode & H_DEBLOCK)
  3081. {
  3082. if( isVertDC(tempBlock1, 16))
  3083. {
  3084. if(isVertMinMaxOk(tempBlock1, 16, QP))
  3085. doVertLowPass(tempBlock1, 16, QP);
  3086. }
  3087. else
  3088. doVertDefFilter(tempBlock1, 16, QP);
  3089. }
  3090. transpose2(dstBlock-4, dstStride, tempBlock1 + 4*16);
  3091. #else
  3092. if(mode & H_X1_FILTER)
  3093. horizX1Filter(dstBlock-4, stride, QP);
  3094. else if(mode & H_DEBLOCK)
  3095. {
  3096. if( isHorizDC(dstBlock-4, stride))
  3097. {
  3098. if(isHorizMinMaxOk(dstBlock-4, stride, QP))
  3099. doHorizLowPass(dstBlock-4, stride, QP);
  3100. }
  3101. else
  3102. doHorizDefFilter(dstBlock-4, stride, QP);
  3103. }
  3104. #endif
  3105. #ifdef MORE_TIMING
  3106. T1= rdtsc();
  3107. horizTime+= T1-T0;
  3108. T0=T1;
  3109. #endif
  3110. if(mode & DERING)
  3111. {
  3112. //FIXME filter first line
  3113. if(y>0) dering(dstBlock - stride - 8, stride, QP);
  3114. }
  3115. }
  3116. else if(mode & DERING)
  3117. {
  3118. //FIXME y+15 is required cuz of the tempBuffer thing -> bottom right block isnt filtered
  3119. if(y > 8 && y+15 < height) dering(dstBlock - stride*9 + width - 8, stride, QP);
  3120. }
  3121. #ifdef PP_FUNNY_STRIDE
  3122. /* did we use a tmp-block buffer */
  3123. if(x+7 >= width)
  3124. {
  3125. int i;
  3126. dstBlock= dstBlockPtrBackup;
  3127. srcBlock= srcBlockPtrBackup;
  3128. for(i=0;i<BLOCK_SIZE*2; i++)
  3129. {
  3130. memcpy(dstBlock+i*dstStride, tempDstBlock+i*dstStride, width-x);
  3131. }
  3132. }
  3133. #endif
  3134. dstBlock+=8;
  3135. srcBlock+=8;
  3136. #ifdef HAVE_MMX
  3137. tmpXchg= tempBlock1;
  3138. tempBlock1= tempBlock2;
  3139. tempBlock2 = tmpXchg;
  3140. #endif
  3141. }
  3142. /* did we use a tmp buffer for the last lines*/
  3143. if(y+15 >= height)
  3144. {
  3145. uint8_t *dstBlock= &(dst[y*dstStride]);
  3146. memcpy(dstBlock, tempDst + dstStride, dstStride*(height-y) );
  3147. }
  3148. }
  3149. #ifdef HAVE_3DNOW
  3150. asm volatile("femms");
  3151. #elif defined (HAVE_MMX)
  3152. asm volatile("emms");
  3153. #endif
  3154. #ifdef TIMING
  3155. // FIXME diff is mostly the time spent for rdtsc (should subtract that but ...)
  3156. sumTime= rdtsc() - sumTime;
  3157. if(!isColor)
  3158. printf("cpy:%4dk, vert:%4dk, horiz:%4dk, sum:%4dk, diff:%4dk, color: %d/%d \r",
  3159. (int)(memcpyTime/1000), (int)(vertTime/1000), (int)(horizTime/1000),
  3160. (int)(sumTime/1000), (int)((sumTime-memcpyTime-vertTime-horizTime)/1000)
  3161. , black, white);
  3162. #endif
  3163. }