You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

407 lines
13KB

  1. /*
  2. * MMX optimized motion estimation
  3. * Copyright (c) 2001 Fabrice Bellard.
  4. * Copyright (c) 2002-2004 Michael Niedermayer
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19. *
  20. * mostly by Michael Niedermayer <michaelni@gmx.at>
  21. */
  22. #include "../dsputil.h"
  23. #include "mmx.h"
  24. static const __attribute__ ((aligned(8))) uint64_t round_tab[3]={
  25. 0x0000000000000000ULL,
  26. 0x0001000100010001ULL,
  27. 0x0002000200020002ULL,
  28. };
  29. static attribute_used __attribute__ ((aligned(8))) uint64_t bone= 0x0101010101010101LL;
  30. static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  31. {
  32. long len= -(stride*h);
  33. asm volatile(
  34. ".balign 16 \n\t"
  35. "1: \n\t"
  36. "movq (%1, %%"REG_a"), %%mm0 \n\t"
  37. "movq (%2, %%"REG_a"), %%mm2 \n\t"
  38. "movq (%2, %%"REG_a"), %%mm4 \n\t"
  39. "add %3, %%"REG_a" \n\t"
  40. "psubusb %%mm0, %%mm2 \n\t"
  41. "psubusb %%mm4, %%mm0 \n\t"
  42. "movq (%1, %%"REG_a"), %%mm1 \n\t"
  43. "movq (%2, %%"REG_a"), %%mm3 \n\t"
  44. "movq (%2, %%"REG_a"), %%mm5 \n\t"
  45. "psubusb %%mm1, %%mm3 \n\t"
  46. "psubusb %%mm5, %%mm1 \n\t"
  47. "por %%mm2, %%mm0 \n\t"
  48. "por %%mm1, %%mm3 \n\t"
  49. "movq %%mm0, %%mm1 \n\t"
  50. "movq %%mm3, %%mm2 \n\t"
  51. "punpcklbw %%mm7, %%mm0 \n\t"
  52. "punpckhbw %%mm7, %%mm1 \n\t"
  53. "punpcklbw %%mm7, %%mm3 \n\t"
  54. "punpckhbw %%mm7, %%mm2 \n\t"
  55. "paddw %%mm1, %%mm0 \n\t"
  56. "paddw %%mm3, %%mm2 \n\t"
  57. "paddw %%mm2, %%mm0 \n\t"
  58. "paddw %%mm0, %%mm6 \n\t"
  59. "add %3, %%"REG_a" \n\t"
  60. " js 1b \n\t"
  61. : "+a" (len)
  62. : "r" (blk1 - len), "r" (blk2 - len), "r" ((long)stride)
  63. );
  64. }
  65. static inline void sad8_1_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  66. {
  67. long len= -(stride*h);
  68. asm volatile(
  69. ".balign 16 \n\t"
  70. "1: \n\t"
  71. "movq (%1, %%"REG_a"), %%mm0 \n\t"
  72. "movq (%2, %%"REG_a"), %%mm2 \n\t"
  73. "psadbw %%mm2, %%mm0 \n\t"
  74. "add %3, %%"REG_a" \n\t"
  75. "movq (%1, %%"REG_a"), %%mm1 \n\t"
  76. "movq (%2, %%"REG_a"), %%mm3 \n\t"
  77. "psadbw %%mm1, %%mm3 \n\t"
  78. "paddw %%mm3, %%mm0 \n\t"
  79. "paddw %%mm0, %%mm6 \n\t"
  80. "add %3, %%"REG_a" \n\t"
  81. " js 1b \n\t"
  82. : "+a" (len)
  83. : "r" (blk1 - len), "r" (blk2 - len), "r" ((long)stride)
  84. );
  85. }
  86. static inline void sad8_2_mmx2(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int stride, int h)
  87. {
  88. long len= -(stride*h);
  89. asm volatile(
  90. ".balign 16 \n\t"
  91. "1: \n\t"
  92. "movq (%1, %%"REG_a"), %%mm0 \n\t"
  93. "movq (%2, %%"REG_a"), %%mm2 \n\t"
  94. "pavgb %%mm2, %%mm0 \n\t"
  95. "movq (%3, %%"REG_a"), %%mm2 \n\t"
  96. "psadbw %%mm2, %%mm0 \n\t"
  97. "add %4, %%"REG_a" \n\t"
  98. "movq (%1, %%"REG_a"), %%mm1 \n\t"
  99. "movq (%2, %%"REG_a"), %%mm3 \n\t"
  100. "pavgb %%mm1, %%mm3 \n\t"
  101. "movq (%3, %%"REG_a"), %%mm1 \n\t"
  102. "psadbw %%mm1, %%mm3 \n\t"
  103. "paddw %%mm3, %%mm0 \n\t"
  104. "paddw %%mm0, %%mm6 \n\t"
  105. "add %4, %%"REG_a" \n\t"
  106. " js 1b \n\t"
  107. : "+a" (len)
  108. : "r" (blk1a - len), "r" (blk1b -len), "r" (blk2 - len), "r" ((long)stride)
  109. );
  110. }
  111. static inline void sad8_4_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  112. { //FIXME reuse src
  113. long len= -(stride*h);
  114. asm volatile(
  115. ".balign 16 \n\t"
  116. "movq "MANGLE(bone)", %%mm5 \n\t"
  117. "1: \n\t"
  118. "movq (%1, %%"REG_a"), %%mm0 \n\t"
  119. "movq (%2, %%"REG_a"), %%mm2 \n\t"
  120. "movq 1(%1, %%"REG_a"), %%mm1 \n\t"
  121. "movq 1(%2, %%"REG_a"), %%mm3 \n\t"
  122. "pavgb %%mm2, %%mm0 \n\t"
  123. "pavgb %%mm1, %%mm3 \n\t"
  124. "psubusb %%mm5, %%mm3 \n\t"
  125. "pavgb %%mm3, %%mm0 \n\t"
  126. "movq (%3, %%"REG_a"), %%mm2 \n\t"
  127. "psadbw %%mm2, %%mm0 \n\t"
  128. "add %4, %%"REG_a" \n\t"
  129. "movq (%1, %%"REG_a"), %%mm1 \n\t"
  130. "movq (%2, %%"REG_a"), %%mm3 \n\t"
  131. "movq 1(%1, %%"REG_a"), %%mm2 \n\t"
  132. "movq 1(%2, %%"REG_a"), %%mm4 \n\t"
  133. "pavgb %%mm3, %%mm1 \n\t"
  134. "pavgb %%mm4, %%mm2 \n\t"
  135. "psubusb %%mm5, %%mm2 \n\t"
  136. "pavgb %%mm1, %%mm2 \n\t"
  137. "movq (%3, %%"REG_a"), %%mm1 \n\t"
  138. "psadbw %%mm1, %%mm2 \n\t"
  139. "paddw %%mm2, %%mm0 \n\t"
  140. "paddw %%mm0, %%mm6 \n\t"
  141. "add %4, %%"REG_a" \n\t"
  142. " js 1b \n\t"
  143. : "+a" (len)
  144. : "r" (blk1 - len), "r" (blk1 - len + stride), "r" (blk2 - len), "r" ((long)stride)
  145. );
  146. }
  147. static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int stride, int h)
  148. {
  149. long len= -(stride*h);
  150. asm volatile(
  151. ".balign 16 \n\t"
  152. "1: \n\t"
  153. "movq (%1, %%"REG_a"), %%mm0 \n\t"
  154. "movq (%2, %%"REG_a"), %%mm1 \n\t"
  155. "movq (%1, %%"REG_a"), %%mm2 \n\t"
  156. "movq (%2, %%"REG_a"), %%mm3 \n\t"
  157. "punpcklbw %%mm7, %%mm0 \n\t"
  158. "punpcklbw %%mm7, %%mm1 \n\t"
  159. "punpckhbw %%mm7, %%mm2 \n\t"
  160. "punpckhbw %%mm7, %%mm3 \n\t"
  161. "paddw %%mm0, %%mm1 \n\t"
  162. "paddw %%mm2, %%mm3 \n\t"
  163. "movq (%3, %%"REG_a"), %%mm4 \n\t"
  164. "movq (%3, %%"REG_a"), %%mm2 \n\t"
  165. "paddw %%mm5, %%mm1 \n\t"
  166. "paddw %%mm5, %%mm3 \n\t"
  167. "psrlw $1, %%mm1 \n\t"
  168. "psrlw $1, %%mm3 \n\t"
  169. "packuswb %%mm3, %%mm1 \n\t"
  170. "psubusb %%mm1, %%mm4 \n\t"
  171. "psubusb %%mm2, %%mm1 \n\t"
  172. "por %%mm4, %%mm1 \n\t"
  173. "movq %%mm1, %%mm0 \n\t"
  174. "punpcklbw %%mm7, %%mm0 \n\t"
  175. "punpckhbw %%mm7, %%mm1 \n\t"
  176. "paddw %%mm1, %%mm0 \n\t"
  177. "paddw %%mm0, %%mm6 \n\t"
  178. "add %4, %%"REG_a" \n\t"
  179. " js 1b \n\t"
  180. : "+a" (len)
  181. : "r" (blk1a - len), "r" (blk1b -len), "r" (blk2 - len), "r" ((long)stride)
  182. );
  183. }
  184. static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  185. {
  186. long len= -(stride*h);
  187. asm volatile(
  188. ".balign 16 \n\t"
  189. "1: \n\t"
  190. "movq (%1, %%"REG_a"), %%mm0 \n\t"
  191. "movq (%2, %%"REG_a"), %%mm1 \n\t"
  192. "movq %%mm0, %%mm4 \n\t"
  193. "movq %%mm1, %%mm2 \n\t"
  194. "punpcklbw %%mm7, %%mm0 \n\t"
  195. "punpcklbw %%mm7, %%mm1 \n\t"
  196. "punpckhbw %%mm7, %%mm4 \n\t"
  197. "punpckhbw %%mm7, %%mm2 \n\t"
  198. "paddw %%mm1, %%mm0 \n\t"
  199. "paddw %%mm2, %%mm4 \n\t"
  200. "movq 1(%1, %%"REG_a"), %%mm2 \n\t"
  201. "movq 1(%2, %%"REG_a"), %%mm3 \n\t"
  202. "movq %%mm2, %%mm1 \n\t"
  203. "punpcklbw %%mm7, %%mm2 \n\t"
  204. "punpckhbw %%mm7, %%mm1 \n\t"
  205. "paddw %%mm0, %%mm2 \n\t"
  206. "paddw %%mm4, %%mm1 \n\t"
  207. "movq %%mm3, %%mm4 \n\t"
  208. "punpcklbw %%mm7, %%mm3 \n\t"
  209. "punpckhbw %%mm7, %%mm4 \n\t"
  210. "paddw %%mm3, %%mm2 \n\t"
  211. "paddw %%mm4, %%mm1 \n\t"
  212. "movq (%3, %%"REG_a"), %%mm3 \n\t"
  213. "movq (%3, %%"REG_a"), %%mm4 \n\t"
  214. "paddw %%mm5, %%mm2 \n\t"
  215. "paddw %%mm5, %%mm1 \n\t"
  216. "psrlw $2, %%mm2 \n\t"
  217. "psrlw $2, %%mm1 \n\t"
  218. "packuswb %%mm1, %%mm2 \n\t"
  219. "psubusb %%mm2, %%mm3 \n\t"
  220. "psubusb %%mm4, %%mm2 \n\t"
  221. "por %%mm3, %%mm2 \n\t"
  222. "movq %%mm2, %%mm0 \n\t"
  223. "punpcklbw %%mm7, %%mm0 \n\t"
  224. "punpckhbw %%mm7, %%mm2 \n\t"
  225. "paddw %%mm2, %%mm0 \n\t"
  226. "paddw %%mm0, %%mm6 \n\t"
  227. "add %4, %%"REG_a" \n\t"
  228. " js 1b \n\t"
  229. : "+a" (len)
  230. : "r" (blk1 - len), "r" (blk1 -len + stride), "r" (blk2 - len), "r" ((long)stride)
  231. );
  232. }
  233. static inline int sum_mmx(void)
  234. {
  235. int ret;
  236. asm volatile(
  237. "movq %%mm6, %%mm0 \n\t"
  238. "psrlq $32, %%mm6 \n\t"
  239. "paddw %%mm0, %%mm6 \n\t"
  240. "movq %%mm6, %%mm0 \n\t"
  241. "psrlq $16, %%mm6 \n\t"
  242. "paddw %%mm0, %%mm6 \n\t"
  243. "movd %%mm6, %0 \n\t"
  244. : "=r" (ret)
  245. );
  246. return ret&0xFFFF;
  247. }
  248. static inline int sum_mmx2(void)
  249. {
  250. int ret;
  251. asm volatile(
  252. "movd %%mm6, %0 \n\t"
  253. : "=r" (ret)
  254. );
  255. return ret;
  256. }
  257. #define PIX_SAD(suf)\
  258. static int sad8_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  259. {\
  260. assert(h==8);\
  261. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  262. "pxor %%mm6, %%mm6 \n\t":);\
  263. \
  264. sad8_1_ ## suf(blk1, blk2, stride, 8);\
  265. \
  266. return sum_ ## suf();\
  267. }\
  268. static int sad8_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  269. {\
  270. assert(h==8);\
  271. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  272. "pxor %%mm6, %%mm6 \n\t"\
  273. "movq %0, %%mm5 \n\t"\
  274. :: "m"(round_tab[1]) \
  275. );\
  276. \
  277. sad8_2_ ## suf(blk1, blk1+1, blk2, stride, 8);\
  278. \
  279. return sum_ ## suf();\
  280. }\
  281. \
  282. static int sad8_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  283. {\
  284. assert(h==8);\
  285. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  286. "pxor %%mm6, %%mm6 \n\t"\
  287. "movq %0, %%mm5 \n\t"\
  288. :: "m"(round_tab[1]) \
  289. );\
  290. \
  291. sad8_2_ ## suf(blk1, blk1+stride, blk2, stride, 8);\
  292. \
  293. return sum_ ## suf();\
  294. }\
  295. \
  296. static int sad8_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  297. {\
  298. assert(h==8);\
  299. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  300. "pxor %%mm6, %%mm6 \n\t"\
  301. "movq %0, %%mm5 \n\t"\
  302. :: "m"(round_tab[2]) \
  303. );\
  304. \
  305. sad8_4_ ## suf(blk1, blk2, stride, 8);\
  306. \
  307. return sum_ ## suf();\
  308. }\
  309. \
  310. static int sad16_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  311. {\
  312. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  313. "pxor %%mm6, %%mm6 \n\t":);\
  314. \
  315. sad8_1_ ## suf(blk1 , blk2 , stride, h);\
  316. sad8_1_ ## suf(blk1+8, blk2+8, stride, h);\
  317. \
  318. return sum_ ## suf();\
  319. }\
  320. static int sad16_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  321. {\
  322. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  323. "pxor %%mm6, %%mm6 \n\t"\
  324. "movq %0, %%mm5 \n\t"\
  325. :: "m"(round_tab[1]) \
  326. );\
  327. \
  328. sad8_2_ ## suf(blk1 , blk1+1, blk2 , stride, h);\
  329. sad8_2_ ## suf(blk1+8, blk1+9, blk2+8, stride, h);\
  330. \
  331. return sum_ ## suf();\
  332. }\
  333. static int sad16_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  334. {\
  335. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  336. "pxor %%mm6, %%mm6 \n\t"\
  337. "movq %0, %%mm5 \n\t"\
  338. :: "m"(round_tab[1]) \
  339. );\
  340. \
  341. sad8_2_ ## suf(blk1 , blk1+stride, blk2 , stride, h);\
  342. sad8_2_ ## suf(blk1+8, blk1+stride+8,blk2+8, stride, h);\
  343. \
  344. return sum_ ## suf();\
  345. }\
  346. static int sad16_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  347. {\
  348. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  349. "pxor %%mm6, %%mm6 \n\t"\
  350. "movq %0, %%mm5 \n\t"\
  351. :: "m"(round_tab[2]) \
  352. );\
  353. \
  354. sad8_4_ ## suf(blk1 , blk2 , stride, h);\
  355. sad8_4_ ## suf(blk1+8, blk2+8, stride, h);\
  356. \
  357. return sum_ ## suf();\
  358. }\
  359. PIX_SAD(mmx)
  360. PIX_SAD(mmx2)
  361. void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx)
  362. {
  363. if (mm_flags & MM_MMX) {
  364. c->pix_abs[0][0] = sad16_mmx;
  365. c->pix_abs[0][1] = sad16_x2_mmx;
  366. c->pix_abs[0][2] = sad16_y2_mmx;
  367. c->pix_abs[0][3] = sad16_xy2_mmx;
  368. c->pix_abs[1][0] = sad8_mmx;
  369. c->pix_abs[1][1] = sad8_x2_mmx;
  370. c->pix_abs[1][2] = sad8_y2_mmx;
  371. c->pix_abs[1][3] = sad8_xy2_mmx;
  372. c->sad[0]= sad16_mmx;
  373. c->sad[1]= sad8_mmx;
  374. }
  375. if (mm_flags & MM_MMXEXT) {
  376. c->pix_abs[0][0] = sad16_mmx2;
  377. c->pix_abs[1][0] = sad8_mmx2;
  378. c->sad[0]= sad16_mmx2;
  379. c->sad[1]= sad8_mmx2;
  380. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  381. c->pix_abs[0][1] = sad16_x2_mmx2;
  382. c->pix_abs[0][2] = sad16_y2_mmx2;
  383. c->pix_abs[0][3] = sad16_xy2_mmx2;
  384. c->pix_abs[1][1] = sad8_x2_mmx2;
  385. c->pix_abs[1][2] = sad8_y2_mmx2;
  386. c->pix_abs[1][3] = sad8_xy2_mmx2;
  387. }
  388. }
  389. }