You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

911 lines
28KB

  1. /*
  2. * MMX optimized DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
  7. *
  8. * This file is part of Libav.
  9. *
  10. * Libav is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * Libav is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with Libav; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. #include "libavutil/attributes.h"
  25. #include "libavutil/cpu.h"
  26. #include "libavutil/x86/asm.h"
  27. #include "libavutil/x86/cpu.h"
  28. #include "libavcodec/dct.h"
  29. #include "libavcodec/dsputil.h"
  30. #include "libavcodec/mpegvideo.h"
  31. #include "dsputil_x86.h"
  32. void ff_get_pixels_mmx(int16_t *block, const uint8_t *pixels, int line_size);
  33. void ff_get_pixels_sse2(int16_t *block, const uint8_t *pixels, int line_size);
  34. void ff_diff_pixels_mmx(int16_t *block, const uint8_t *s1, const uint8_t *s2,
  35. int stride);
  36. #if HAVE_INLINE_ASM
  37. static int sse8_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  38. int line_size, int h)
  39. {
  40. int tmp;
  41. __asm__ volatile (
  42. "movl %4, %%ecx \n"
  43. "shr $1, %%ecx \n"
  44. "pxor %%mm0, %%mm0 \n" /* mm0 = 0 */
  45. "pxor %%mm7, %%mm7 \n" /* mm7 holds the sum */
  46. "1: \n"
  47. "movq (%0), %%mm1 \n" /* mm1 = pix1[0][0 - 7] */
  48. "movq (%1), %%mm2 \n" /* mm2 = pix2[0][0 - 7] */
  49. "movq (%0, %3), %%mm3 \n" /* mm3 = pix1[1][0 - 7] */
  50. "movq (%1, %3), %%mm4 \n" /* mm4 = pix2[1][0 - 7] */
  51. /* todo: mm1-mm2, mm3-mm4 */
  52. /* algo: subtract mm1 from mm2 with saturation and vice versa */
  53. /* OR the results to get absolute difference */
  54. "movq %%mm1, %%mm5 \n"
  55. "movq %%mm3, %%mm6 \n"
  56. "psubusb %%mm2, %%mm1 \n"
  57. "psubusb %%mm4, %%mm3 \n"
  58. "psubusb %%mm5, %%mm2 \n"
  59. "psubusb %%mm6, %%mm4 \n"
  60. "por %%mm1, %%mm2 \n"
  61. "por %%mm3, %%mm4 \n"
  62. /* now convert to 16-bit vectors so we can square them */
  63. "movq %%mm2, %%mm1 \n"
  64. "movq %%mm4, %%mm3 \n"
  65. "punpckhbw %%mm0, %%mm2 \n"
  66. "punpckhbw %%mm0, %%mm4 \n"
  67. "punpcklbw %%mm0, %%mm1 \n" /* mm1 now spread over (mm1, mm2) */
  68. "punpcklbw %%mm0, %%mm3 \n" /* mm4 now spread over (mm3, mm4) */
  69. "pmaddwd %%mm2, %%mm2 \n"
  70. "pmaddwd %%mm4, %%mm4 \n"
  71. "pmaddwd %%mm1, %%mm1 \n"
  72. "pmaddwd %%mm3, %%mm3 \n"
  73. "lea (%0, %3, 2), %0 \n" /* pix1 += 2 * line_size */
  74. "lea (%1, %3, 2), %1 \n" /* pix2 += 2 * line_size */
  75. "paddd %%mm2, %%mm1 \n"
  76. "paddd %%mm4, %%mm3 \n"
  77. "paddd %%mm1, %%mm7 \n"
  78. "paddd %%mm3, %%mm7 \n"
  79. "decl %%ecx \n"
  80. "jnz 1b \n"
  81. "movq %%mm7, %%mm1 \n"
  82. "psrlq $32, %%mm7 \n" /* shift hi dword to lo */
  83. "paddd %%mm7, %%mm1 \n"
  84. "movd %%mm1, %2 \n"
  85. : "+r" (pix1), "+r" (pix2), "=r" (tmp)
  86. : "r" ((x86_reg) line_size), "m" (h)
  87. : "%ecx");
  88. return tmp;
  89. }
  90. static int sse16_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  91. int line_size, int h)
  92. {
  93. int tmp;
  94. __asm__ volatile (
  95. "movl %4, %%ecx\n"
  96. "pxor %%mm0, %%mm0\n" /* mm0 = 0 */
  97. "pxor %%mm7, %%mm7\n" /* mm7 holds the sum */
  98. "1:\n"
  99. "movq (%0), %%mm1\n" /* mm1 = pix1[0 - 7] */
  100. "movq (%1), %%mm2\n" /* mm2 = pix2[0 - 7] */
  101. "movq 8(%0), %%mm3\n" /* mm3 = pix1[8 - 15] */
  102. "movq 8(%1), %%mm4\n" /* mm4 = pix2[8 - 15] */
  103. /* todo: mm1-mm2, mm3-mm4 */
  104. /* algo: subtract mm1 from mm2 with saturation and vice versa */
  105. /* OR the results to get absolute difference */
  106. "movq %%mm1, %%mm5\n"
  107. "movq %%mm3, %%mm6\n"
  108. "psubusb %%mm2, %%mm1\n"
  109. "psubusb %%mm4, %%mm3\n"
  110. "psubusb %%mm5, %%mm2\n"
  111. "psubusb %%mm6, %%mm4\n"
  112. "por %%mm1, %%mm2\n"
  113. "por %%mm3, %%mm4\n"
  114. /* now convert to 16-bit vectors so we can square them */
  115. "movq %%mm2, %%mm1\n"
  116. "movq %%mm4, %%mm3\n"
  117. "punpckhbw %%mm0, %%mm2\n"
  118. "punpckhbw %%mm0, %%mm4\n"
  119. "punpcklbw %%mm0, %%mm1\n" /* mm1 now spread over (mm1, mm2) */
  120. "punpcklbw %%mm0, %%mm3\n" /* mm4 now spread over (mm3, mm4) */
  121. "pmaddwd %%mm2, %%mm2\n"
  122. "pmaddwd %%mm4, %%mm4\n"
  123. "pmaddwd %%mm1, %%mm1\n"
  124. "pmaddwd %%mm3, %%mm3\n"
  125. "add %3, %0\n"
  126. "add %3, %1\n"
  127. "paddd %%mm2, %%mm1\n"
  128. "paddd %%mm4, %%mm3\n"
  129. "paddd %%mm1, %%mm7\n"
  130. "paddd %%mm3, %%mm7\n"
  131. "decl %%ecx\n"
  132. "jnz 1b\n"
  133. "movq %%mm7, %%mm1\n"
  134. "psrlq $32, %%mm7\n" /* shift hi dword to lo */
  135. "paddd %%mm7, %%mm1\n"
  136. "movd %%mm1, %2\n"
  137. : "+r" (pix1), "+r" (pix2), "=r" (tmp)
  138. : "r" ((x86_reg) line_size), "m" (h)
  139. : "%ecx");
  140. return tmp;
  141. }
  142. static int hf_noise8_mmx(uint8_t *pix1, int line_size, int h)
  143. {
  144. int tmp;
  145. __asm__ volatile (
  146. "movl %3, %%ecx\n"
  147. "pxor %%mm7, %%mm7\n"
  148. "pxor %%mm6, %%mm6\n"
  149. "movq (%0), %%mm0\n"
  150. "movq %%mm0, %%mm1\n"
  151. "psllq $8, %%mm0\n"
  152. "psrlq $8, %%mm1\n"
  153. "psrlq $8, %%mm0\n"
  154. "movq %%mm0, %%mm2\n"
  155. "movq %%mm1, %%mm3\n"
  156. "punpcklbw %%mm7, %%mm0\n"
  157. "punpcklbw %%mm7, %%mm1\n"
  158. "punpckhbw %%mm7, %%mm2\n"
  159. "punpckhbw %%mm7, %%mm3\n"
  160. "psubw %%mm1, %%mm0\n"
  161. "psubw %%mm3, %%mm2\n"
  162. "add %2, %0\n"
  163. "movq (%0), %%mm4\n"
  164. "movq %%mm4, %%mm1\n"
  165. "psllq $8, %%mm4\n"
  166. "psrlq $8, %%mm1\n"
  167. "psrlq $8, %%mm4\n"
  168. "movq %%mm4, %%mm5\n"
  169. "movq %%mm1, %%mm3\n"
  170. "punpcklbw %%mm7, %%mm4\n"
  171. "punpcklbw %%mm7, %%mm1\n"
  172. "punpckhbw %%mm7, %%mm5\n"
  173. "punpckhbw %%mm7, %%mm3\n"
  174. "psubw %%mm1, %%mm4\n"
  175. "psubw %%mm3, %%mm5\n"
  176. "psubw %%mm4, %%mm0\n"
  177. "psubw %%mm5, %%mm2\n"
  178. "pxor %%mm3, %%mm3\n"
  179. "pxor %%mm1, %%mm1\n"
  180. "pcmpgtw %%mm0, %%mm3\n\t"
  181. "pcmpgtw %%mm2, %%mm1\n\t"
  182. "pxor %%mm3, %%mm0\n"
  183. "pxor %%mm1, %%mm2\n"
  184. "psubw %%mm3, %%mm0\n"
  185. "psubw %%mm1, %%mm2\n"
  186. "paddw %%mm0, %%mm2\n"
  187. "paddw %%mm2, %%mm6\n"
  188. "add %2, %0\n"
  189. "1:\n"
  190. "movq (%0), %%mm0\n"
  191. "movq %%mm0, %%mm1\n"
  192. "psllq $8, %%mm0\n"
  193. "psrlq $8, %%mm1\n"
  194. "psrlq $8, %%mm0\n"
  195. "movq %%mm0, %%mm2\n"
  196. "movq %%mm1, %%mm3\n"
  197. "punpcklbw %%mm7, %%mm0\n"
  198. "punpcklbw %%mm7, %%mm1\n"
  199. "punpckhbw %%mm7, %%mm2\n"
  200. "punpckhbw %%mm7, %%mm3\n"
  201. "psubw %%mm1, %%mm0\n"
  202. "psubw %%mm3, %%mm2\n"
  203. "psubw %%mm0, %%mm4\n"
  204. "psubw %%mm2, %%mm5\n"
  205. "pxor %%mm3, %%mm3\n"
  206. "pxor %%mm1, %%mm1\n"
  207. "pcmpgtw %%mm4, %%mm3\n\t"
  208. "pcmpgtw %%mm5, %%mm1\n\t"
  209. "pxor %%mm3, %%mm4\n"
  210. "pxor %%mm1, %%mm5\n"
  211. "psubw %%mm3, %%mm4\n"
  212. "psubw %%mm1, %%mm5\n"
  213. "paddw %%mm4, %%mm5\n"
  214. "paddw %%mm5, %%mm6\n"
  215. "add %2, %0\n"
  216. "movq (%0), %%mm4\n"
  217. "movq %%mm4, %%mm1\n"
  218. "psllq $8, %%mm4\n"
  219. "psrlq $8, %%mm1\n"
  220. "psrlq $8, %%mm4\n"
  221. "movq %%mm4, %%mm5\n"
  222. "movq %%mm1, %%mm3\n"
  223. "punpcklbw %%mm7, %%mm4\n"
  224. "punpcklbw %%mm7, %%mm1\n"
  225. "punpckhbw %%mm7, %%mm5\n"
  226. "punpckhbw %%mm7, %%mm3\n"
  227. "psubw %%mm1, %%mm4\n"
  228. "psubw %%mm3, %%mm5\n"
  229. "psubw %%mm4, %%mm0\n"
  230. "psubw %%mm5, %%mm2\n"
  231. "pxor %%mm3, %%mm3\n"
  232. "pxor %%mm1, %%mm1\n"
  233. "pcmpgtw %%mm0, %%mm3\n\t"
  234. "pcmpgtw %%mm2, %%mm1\n\t"
  235. "pxor %%mm3, %%mm0\n"
  236. "pxor %%mm1, %%mm2\n"
  237. "psubw %%mm3, %%mm0\n"
  238. "psubw %%mm1, %%mm2\n"
  239. "paddw %%mm0, %%mm2\n"
  240. "paddw %%mm2, %%mm6\n"
  241. "add %2, %0\n"
  242. "subl $2, %%ecx\n"
  243. " jnz 1b\n"
  244. "movq %%mm6, %%mm0\n"
  245. "punpcklwd %%mm7, %%mm0\n"
  246. "punpckhwd %%mm7, %%mm6\n"
  247. "paddd %%mm0, %%mm6\n"
  248. "movq %%mm6, %%mm0\n"
  249. "psrlq $32, %%mm6\n"
  250. "paddd %%mm6, %%mm0\n"
  251. "movd %%mm0, %1\n"
  252. : "+r" (pix1), "=r" (tmp)
  253. : "r" ((x86_reg) line_size), "g" (h - 2)
  254. : "%ecx");
  255. return tmp;
  256. }
  257. static int hf_noise16_mmx(uint8_t *pix1, int line_size, int h)
  258. {
  259. int tmp;
  260. uint8_t *pix = pix1;
  261. __asm__ volatile (
  262. "movl %3, %%ecx\n"
  263. "pxor %%mm7, %%mm7\n"
  264. "pxor %%mm6, %%mm6\n"
  265. "movq (%0), %%mm0\n"
  266. "movq 1(%0), %%mm1\n"
  267. "movq %%mm0, %%mm2\n"
  268. "movq %%mm1, %%mm3\n"
  269. "punpcklbw %%mm7, %%mm0\n"
  270. "punpcklbw %%mm7, %%mm1\n"
  271. "punpckhbw %%mm7, %%mm2\n"
  272. "punpckhbw %%mm7, %%mm3\n"
  273. "psubw %%mm1, %%mm0\n"
  274. "psubw %%mm3, %%mm2\n"
  275. "add %2, %0\n"
  276. "movq (%0), %%mm4\n"
  277. "movq 1(%0), %%mm1\n"
  278. "movq %%mm4, %%mm5\n"
  279. "movq %%mm1, %%mm3\n"
  280. "punpcklbw %%mm7, %%mm4\n"
  281. "punpcklbw %%mm7, %%mm1\n"
  282. "punpckhbw %%mm7, %%mm5\n"
  283. "punpckhbw %%mm7, %%mm3\n"
  284. "psubw %%mm1, %%mm4\n"
  285. "psubw %%mm3, %%mm5\n"
  286. "psubw %%mm4, %%mm0\n"
  287. "psubw %%mm5, %%mm2\n"
  288. "pxor %%mm3, %%mm3\n"
  289. "pxor %%mm1, %%mm1\n"
  290. "pcmpgtw %%mm0, %%mm3\n\t"
  291. "pcmpgtw %%mm2, %%mm1\n\t"
  292. "pxor %%mm3, %%mm0\n"
  293. "pxor %%mm1, %%mm2\n"
  294. "psubw %%mm3, %%mm0\n"
  295. "psubw %%mm1, %%mm2\n"
  296. "paddw %%mm0, %%mm2\n"
  297. "paddw %%mm2, %%mm6\n"
  298. "add %2, %0\n"
  299. "1:\n"
  300. "movq (%0), %%mm0\n"
  301. "movq 1(%0), %%mm1\n"
  302. "movq %%mm0, %%mm2\n"
  303. "movq %%mm1, %%mm3\n"
  304. "punpcklbw %%mm7, %%mm0\n"
  305. "punpcklbw %%mm7, %%mm1\n"
  306. "punpckhbw %%mm7, %%mm2\n"
  307. "punpckhbw %%mm7, %%mm3\n"
  308. "psubw %%mm1, %%mm0\n"
  309. "psubw %%mm3, %%mm2\n"
  310. "psubw %%mm0, %%mm4\n"
  311. "psubw %%mm2, %%mm5\n"
  312. "pxor %%mm3, %%mm3\n"
  313. "pxor %%mm1, %%mm1\n"
  314. "pcmpgtw %%mm4, %%mm3\n\t"
  315. "pcmpgtw %%mm5, %%mm1\n\t"
  316. "pxor %%mm3, %%mm4\n"
  317. "pxor %%mm1, %%mm5\n"
  318. "psubw %%mm3, %%mm4\n"
  319. "psubw %%mm1, %%mm5\n"
  320. "paddw %%mm4, %%mm5\n"
  321. "paddw %%mm5, %%mm6\n"
  322. "add %2, %0\n"
  323. "movq (%0), %%mm4\n"
  324. "movq 1(%0), %%mm1\n"
  325. "movq %%mm4, %%mm5\n"
  326. "movq %%mm1, %%mm3\n"
  327. "punpcklbw %%mm7, %%mm4\n"
  328. "punpcklbw %%mm7, %%mm1\n"
  329. "punpckhbw %%mm7, %%mm5\n"
  330. "punpckhbw %%mm7, %%mm3\n"
  331. "psubw %%mm1, %%mm4\n"
  332. "psubw %%mm3, %%mm5\n"
  333. "psubw %%mm4, %%mm0\n"
  334. "psubw %%mm5, %%mm2\n"
  335. "pxor %%mm3, %%mm3\n"
  336. "pxor %%mm1, %%mm1\n"
  337. "pcmpgtw %%mm0, %%mm3\n\t"
  338. "pcmpgtw %%mm2, %%mm1\n\t"
  339. "pxor %%mm3, %%mm0\n"
  340. "pxor %%mm1, %%mm2\n"
  341. "psubw %%mm3, %%mm0\n"
  342. "psubw %%mm1, %%mm2\n"
  343. "paddw %%mm0, %%mm2\n"
  344. "paddw %%mm2, %%mm6\n"
  345. "add %2, %0\n"
  346. "subl $2, %%ecx\n"
  347. " jnz 1b\n"
  348. "movq %%mm6, %%mm0\n"
  349. "punpcklwd %%mm7, %%mm0\n"
  350. "punpckhwd %%mm7, %%mm6\n"
  351. "paddd %%mm0, %%mm6\n"
  352. "movq %%mm6, %%mm0\n"
  353. "psrlq $32, %%mm6\n"
  354. "paddd %%mm6, %%mm0\n"
  355. "movd %%mm0, %1\n"
  356. : "+r" (pix1), "=r" (tmp)
  357. : "r" ((x86_reg) line_size), "g" (h - 2)
  358. : "%ecx");
  359. return tmp + hf_noise8_mmx(pix + 8, line_size, h);
  360. }
  361. static int nsse16_mmx(MpegEncContext *c, uint8_t *pix1, uint8_t *pix2,
  362. int line_size, int h)
  363. {
  364. int score1, score2;
  365. if (c)
  366. score1 = c->dsp.sse[0](c, pix1, pix2, line_size, h);
  367. else
  368. score1 = sse16_mmx(c, pix1, pix2, line_size, h);
  369. score2 = hf_noise16_mmx(pix1, line_size, h) -
  370. hf_noise16_mmx(pix2, line_size, h);
  371. if (c)
  372. return score1 + FFABS(score2) * c->avctx->nsse_weight;
  373. else
  374. return score1 + FFABS(score2) * 8;
  375. }
  376. static int nsse8_mmx(MpegEncContext *c, uint8_t *pix1, uint8_t *pix2,
  377. int line_size, int h)
  378. {
  379. int score1 = sse8_mmx(c, pix1, pix2, line_size, h);
  380. int score2 = hf_noise8_mmx(pix1, line_size, h) -
  381. hf_noise8_mmx(pix2, line_size, h);
  382. if (c)
  383. return score1 + FFABS(score2) * c->avctx->nsse_weight;
  384. else
  385. return score1 + FFABS(score2) * 8;
  386. }
  387. static int vsad_intra16_mmx(MpegEncContext *v, uint8_t *pix, uint8_t *dummy,
  388. int line_size, int h)
  389. {
  390. int tmp;
  391. assert((((int) pix) & 7) == 0);
  392. assert((line_size & 7) == 0);
  393. #define SUM(in0, in1, out0, out1) \
  394. "movq (%0), %%mm2\n" \
  395. "movq 8(%0), %%mm3\n" \
  396. "add %2,%0\n" \
  397. "movq %%mm2, " #out0 "\n" \
  398. "movq %%mm3, " #out1 "\n" \
  399. "psubusb " #in0 ", %%mm2\n" \
  400. "psubusb " #in1 ", %%mm3\n" \
  401. "psubusb " #out0 ", " #in0 "\n" \
  402. "psubusb " #out1 ", " #in1 "\n" \
  403. "por %%mm2, " #in0 "\n" \
  404. "por %%mm3, " #in1 "\n" \
  405. "movq " #in0 ", %%mm2\n" \
  406. "movq " #in1 ", %%mm3\n" \
  407. "punpcklbw %%mm7, " #in0 "\n" \
  408. "punpcklbw %%mm7, " #in1 "\n" \
  409. "punpckhbw %%mm7, %%mm2\n" \
  410. "punpckhbw %%mm7, %%mm3\n" \
  411. "paddw " #in1 ", " #in0 "\n" \
  412. "paddw %%mm3, %%mm2\n" \
  413. "paddw %%mm2, " #in0 "\n" \
  414. "paddw " #in0 ", %%mm6\n"
  415. __asm__ volatile (
  416. "movl %3, %%ecx\n"
  417. "pxor %%mm6, %%mm6\n"
  418. "pxor %%mm7, %%mm7\n"
  419. "movq (%0), %%mm0\n"
  420. "movq 8(%0), %%mm1\n"
  421. "add %2, %0\n"
  422. "jmp 2f\n"
  423. "1:\n"
  424. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  425. "2:\n"
  426. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  427. "subl $2, %%ecx\n"
  428. "jnz 1b\n"
  429. "movq %%mm6, %%mm0\n"
  430. "psrlq $32, %%mm6\n"
  431. "paddw %%mm6, %%mm0\n"
  432. "movq %%mm0, %%mm6\n"
  433. "psrlq $16, %%mm0\n"
  434. "paddw %%mm6, %%mm0\n"
  435. "movd %%mm0, %1\n"
  436. : "+r" (pix), "=r" (tmp)
  437. : "r" ((x86_reg) line_size), "m" (h)
  438. : "%ecx");
  439. return tmp & 0xFFFF;
  440. }
  441. #undef SUM
  442. static int vsad_intra16_mmxext(MpegEncContext *v, uint8_t *pix, uint8_t *dummy,
  443. int line_size, int h)
  444. {
  445. int tmp;
  446. assert((((int) pix) & 7) == 0);
  447. assert((line_size & 7) == 0);
  448. #define SUM(in0, in1, out0, out1) \
  449. "movq (%0), " #out0 "\n" \
  450. "movq 8(%0), " #out1 "\n" \
  451. "add %2, %0\n" \
  452. "psadbw " #out0 ", " #in0 "\n" \
  453. "psadbw " #out1 ", " #in1 "\n" \
  454. "paddw " #in1 ", " #in0 "\n" \
  455. "paddw " #in0 ", %%mm6\n"
  456. __asm__ volatile (
  457. "movl %3, %%ecx\n"
  458. "pxor %%mm6, %%mm6\n"
  459. "pxor %%mm7, %%mm7\n"
  460. "movq (%0), %%mm0\n"
  461. "movq 8(%0), %%mm1\n"
  462. "add %2, %0\n"
  463. "jmp 2f\n"
  464. "1:\n"
  465. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  466. "2:\n"
  467. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  468. "subl $2, %%ecx\n"
  469. "jnz 1b\n"
  470. "movd %%mm6, %1\n"
  471. : "+r" (pix), "=r" (tmp)
  472. : "r" ((x86_reg) line_size), "m" (h)
  473. : "%ecx");
  474. return tmp;
  475. }
  476. #undef SUM
  477. static int vsad16_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  478. int line_size, int h)
  479. {
  480. int tmp;
  481. assert((((int) pix1) & 7) == 0);
  482. assert((((int) pix2) & 7) == 0);
  483. assert((line_size & 7) == 0);
  484. #define SUM(in0, in1, out0, out1) \
  485. "movq (%0), %%mm2\n" \
  486. "movq (%1), " #out0 "\n" \
  487. "movq 8(%0), %%mm3\n" \
  488. "movq 8(%1), " #out1 "\n" \
  489. "add %3, %0\n" \
  490. "add %3, %1\n" \
  491. "psubb " #out0 ", %%mm2\n" \
  492. "psubb " #out1 ", %%mm3\n" \
  493. "pxor %%mm7, %%mm2\n" \
  494. "pxor %%mm7, %%mm3\n" \
  495. "movq %%mm2, " #out0 "\n" \
  496. "movq %%mm3, " #out1 "\n" \
  497. "psubusb " #in0 ", %%mm2\n" \
  498. "psubusb " #in1 ", %%mm3\n" \
  499. "psubusb " #out0 ", " #in0 "\n" \
  500. "psubusb " #out1 ", " #in1 "\n" \
  501. "por %%mm2, " #in0 "\n" \
  502. "por %%mm3, " #in1 "\n" \
  503. "movq " #in0 ", %%mm2\n" \
  504. "movq " #in1 ", %%mm3\n" \
  505. "punpcklbw %%mm7, " #in0 "\n" \
  506. "punpcklbw %%mm7, " #in1 "\n" \
  507. "punpckhbw %%mm7, %%mm2\n" \
  508. "punpckhbw %%mm7, %%mm3\n" \
  509. "paddw " #in1 ", " #in0 "\n" \
  510. "paddw %%mm3, %%mm2\n" \
  511. "paddw %%mm2, " #in0 "\n" \
  512. "paddw " #in0 ", %%mm6\n"
  513. __asm__ volatile (
  514. "movl %4, %%ecx\n"
  515. "pxor %%mm6, %%mm6\n"
  516. "pcmpeqw %%mm7, %%mm7\n"
  517. "psllw $15, %%mm7\n"
  518. "packsswb %%mm7, %%mm7\n"
  519. "movq (%0), %%mm0\n"
  520. "movq (%1), %%mm2\n"
  521. "movq 8(%0), %%mm1\n"
  522. "movq 8(%1), %%mm3\n"
  523. "add %3, %0\n"
  524. "add %3, %1\n"
  525. "psubb %%mm2, %%mm0\n"
  526. "psubb %%mm3, %%mm1\n"
  527. "pxor %%mm7, %%mm0\n"
  528. "pxor %%mm7, %%mm1\n"
  529. "jmp 2f\n"
  530. "1:\n"
  531. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  532. "2:\n"
  533. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  534. "subl $2, %%ecx\n"
  535. "jnz 1b\n"
  536. "movq %%mm6, %%mm0\n"
  537. "psrlq $32, %%mm6\n"
  538. "paddw %%mm6, %%mm0\n"
  539. "movq %%mm0, %%mm6\n"
  540. "psrlq $16, %%mm0\n"
  541. "paddw %%mm6, %%mm0\n"
  542. "movd %%mm0, %2\n"
  543. : "+r" (pix1), "+r" (pix2), "=r" (tmp)
  544. : "r" ((x86_reg) line_size), "m" (h)
  545. : "%ecx");
  546. return tmp & 0x7FFF;
  547. }
  548. #undef SUM
  549. static int vsad16_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  550. int line_size, int h)
  551. {
  552. int tmp;
  553. assert((((int) pix1) & 7) == 0);
  554. assert((((int) pix2) & 7) == 0);
  555. assert((line_size & 7) == 0);
  556. #define SUM(in0, in1, out0, out1) \
  557. "movq (%0), " #out0 "\n" \
  558. "movq (%1), %%mm2\n" \
  559. "movq 8(%0), " #out1 "\n" \
  560. "movq 8(%1), %%mm3\n" \
  561. "add %3, %0\n" \
  562. "add %3, %1\n" \
  563. "psubb %%mm2, " #out0 "\n" \
  564. "psubb %%mm3, " #out1 "\n" \
  565. "pxor %%mm7, " #out0 "\n" \
  566. "pxor %%mm7, " #out1 "\n" \
  567. "psadbw " #out0 ", " #in0 "\n" \
  568. "psadbw " #out1 ", " #in1 "\n" \
  569. "paddw " #in1 ", " #in0 "\n" \
  570. "paddw " #in0 ", %%mm6\n "
  571. __asm__ volatile (
  572. "movl %4, %%ecx\n"
  573. "pxor %%mm6, %%mm6\n"
  574. "pcmpeqw %%mm7, %%mm7\n"
  575. "psllw $15, %%mm7\n"
  576. "packsswb %%mm7, %%mm7\n"
  577. "movq (%0), %%mm0\n"
  578. "movq (%1), %%mm2\n"
  579. "movq 8(%0), %%mm1\n"
  580. "movq 8(%1), %%mm3\n"
  581. "add %3, %0\n"
  582. "add %3, %1\n"
  583. "psubb %%mm2, %%mm0\n"
  584. "psubb %%mm3, %%mm1\n"
  585. "pxor %%mm7, %%mm0\n"
  586. "pxor %%mm7, %%mm1\n"
  587. "jmp 2f\n"
  588. "1:\n"
  589. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  590. "2:\n"
  591. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  592. "subl $2, %%ecx\n"
  593. "jnz 1b\n"
  594. "movd %%mm6, %2\n"
  595. : "+r" (pix1), "+r" (pix2), "=r" (tmp)
  596. : "r" ((x86_reg) line_size), "m" (h)
  597. : "%ecx");
  598. return tmp;
  599. }
  600. #undef SUM
  601. #define MMABS_MMX(a,z) \
  602. "pxor " #z ", " #z " \n\t" \
  603. "pcmpgtw " #a ", " #z " \n\t" \
  604. "pxor " #z ", " #a " \n\t" \
  605. "psubw " #z ", " #a " \n\t"
  606. #define MMABS_MMXEXT(a, z) \
  607. "pxor " #z ", " #z " \n\t" \
  608. "psubw " #a ", " #z " \n\t" \
  609. "pmaxsw " #z ", " #a " \n\t"
  610. #define MMABS_SSSE3(a,z) \
  611. "pabsw " #a ", " #a " \n\t"
  612. #define MMABS_SUM(a,z, sum) \
  613. MMABS(a,z) \
  614. "paddusw " #a ", " #sum " \n\t"
  615. /* FIXME: HSUM_* saturates at 64k, while an 8x8 hadamard or dct block can get
  616. * up to about 100k on extreme inputs. But that's very unlikely to occur in
  617. * natural video, and it's even more unlikely to not have any alternative
  618. * mvs/modes with lower cost. */
  619. #define HSUM_MMX(a, t, dst) \
  620. "movq " #a ", " #t " \n\t" \
  621. "psrlq $32, " #a " \n\t" \
  622. "paddusw " #t ", " #a " \n\t" \
  623. "movq " #a ", " #t " \n\t" \
  624. "psrlq $16, " #a " \n\t" \
  625. "paddusw " #t ", " #a " \n\t" \
  626. "movd " #a ", " #dst " \n\t" \
  627. #define HSUM_MMXEXT(a, t, dst) \
  628. "pshufw $0x0E, " #a ", " #t " \n\t" \
  629. "paddusw " #t ", " #a " \n\t" \
  630. "pshufw $0x01, " #a ", " #t " \n\t" \
  631. "paddusw " #t ", " #a " \n\t" \
  632. "movd " #a ", " #dst " \n\t" \
  633. #define HSUM_SSE2(a, t, dst) \
  634. "movhlps " #a ", " #t " \n\t" \
  635. "paddusw " #t ", " #a " \n\t" \
  636. "pshuflw $0x0E, " #a ", " #t " \n\t" \
  637. "paddusw " #t ", " #a " \n\t" \
  638. "pshuflw $0x01, " #a ", " #t " \n\t" \
  639. "paddusw " #t ", " #a " \n\t" \
  640. "movd " #a ", " #dst " \n\t" \
  641. #define DCT_SAD4(m, mm, o) \
  642. "mov"#m" "#o" + 0(%1), " #mm "2 \n\t" \
  643. "mov"#m" "#o" + 16(%1), " #mm "3 \n\t" \
  644. "mov"#m" "#o" + 32(%1), " #mm "4 \n\t" \
  645. "mov"#m" "#o" + 48(%1), " #mm "5 \n\t" \
  646. MMABS_SUM(mm ## 2, mm ## 6, mm ## 0) \
  647. MMABS_SUM(mm ## 3, mm ## 7, mm ## 1) \
  648. MMABS_SUM(mm ## 4, mm ## 6, mm ## 0) \
  649. MMABS_SUM(mm ## 5, mm ## 7, mm ## 1) \
  650. #define DCT_SAD_MMX \
  651. "pxor %%mm0, %%mm0 \n\t" \
  652. "pxor %%mm1, %%mm1 \n\t" \
  653. DCT_SAD4(q, %%mm, 0) \
  654. DCT_SAD4(q, %%mm, 8) \
  655. DCT_SAD4(q, %%mm, 64) \
  656. DCT_SAD4(q, %%mm, 72) \
  657. "paddusw %%mm1, %%mm0 \n\t" \
  658. HSUM(%%mm0, %%mm1, %0)
  659. #define DCT_SAD_SSE2 \
  660. "pxor %%xmm0, %%xmm0 \n\t" \
  661. "pxor %%xmm1, %%xmm1 \n\t" \
  662. DCT_SAD4(dqa, %%xmm, 0) \
  663. DCT_SAD4(dqa, %%xmm, 64) \
  664. "paddusw %%xmm1, %%xmm0 \n\t" \
  665. HSUM(%%xmm0, %%xmm1, %0)
  666. #define DCT_SAD_FUNC(cpu) \
  667. static int sum_abs_dctelem_ ## cpu(int16_t *block) \
  668. { \
  669. int sum; \
  670. __asm__ volatile ( \
  671. DCT_SAD \
  672. :"=r"(sum) \
  673. :"r"(block)); \
  674. return sum & 0xFFFF; \
  675. }
  676. #define DCT_SAD DCT_SAD_MMX
  677. #define HSUM(a, t, dst) HSUM_MMX(a, t, dst)
  678. #define MMABS(a, z) MMABS_MMX(a, z)
  679. DCT_SAD_FUNC(mmx)
  680. #undef MMABS
  681. #undef HSUM
  682. #define HSUM(a, t, dst) HSUM_MMXEXT(a, t, dst)
  683. #define MMABS(a, z) MMABS_MMXEXT(a, z)
  684. DCT_SAD_FUNC(mmxext)
  685. #undef HSUM
  686. #undef DCT_SAD
  687. #define DCT_SAD DCT_SAD_SSE2
  688. #define HSUM(a, t, dst) HSUM_SSE2(a, t, dst)
  689. DCT_SAD_FUNC(sse2)
  690. #undef MMABS
  691. #if HAVE_SSSE3_INLINE
  692. #define MMABS(a, z) MMABS_SSSE3(a, z)
  693. DCT_SAD_FUNC(ssse3)
  694. #undef MMABS
  695. #endif
  696. #undef HSUM
  697. #undef DCT_SAD
  698. #endif /* HAVE_INLINE_ASM */
  699. int ff_sse16_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  700. int line_size, int h);
  701. #define hadamard_func(cpu) \
  702. int ff_hadamard8_diff_ ## cpu(MpegEncContext *s, uint8_t *src1, \
  703. uint8_t *src2, int stride, int h); \
  704. int ff_hadamard8_diff16_ ## cpu(MpegEncContext *s, uint8_t *src1, \
  705. uint8_t *src2, int stride, int h);
  706. hadamard_func(mmx)
  707. hadamard_func(mmxext)
  708. hadamard_func(sse2)
  709. hadamard_func(ssse3)
  710. av_cold void ff_dsputilenc_init_mmx(DSPContext *c, AVCodecContext *avctx,
  711. unsigned high_bit_depth)
  712. {
  713. int cpu_flags = av_get_cpu_flags();
  714. const int dct_algo = avctx->dct_algo;
  715. if (EXTERNAL_MMX(cpu_flags)) {
  716. if (!high_bit_depth)
  717. c->get_pixels = ff_get_pixels_mmx;
  718. c->diff_pixels = ff_diff_pixels_mmx;
  719. }
  720. if (EXTERNAL_SSE2(cpu_flags))
  721. if (!high_bit_depth)
  722. c->get_pixels = ff_get_pixels_sse2;
  723. #if HAVE_INLINE_ASM
  724. if (INLINE_MMX(cpu_flags)) {
  725. if (!high_bit_depth &&
  726. (dct_algo == FF_DCT_AUTO || dct_algo == FF_DCT_MMX))
  727. c->fdct = ff_fdct_mmx;
  728. c->sum_abs_dctelem = sum_abs_dctelem_mmx;
  729. c->sse[0] = sse16_mmx;
  730. c->sse[1] = sse8_mmx;
  731. c->vsad[4] = vsad_intra16_mmx;
  732. c->nsse[0] = nsse16_mmx;
  733. c->nsse[1] = nsse8_mmx;
  734. if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
  735. c->vsad[0] = vsad16_mmx;
  736. }
  737. }
  738. if (INLINE_MMXEXT(cpu_flags)) {
  739. if (!high_bit_depth &&
  740. (dct_algo == FF_DCT_AUTO || dct_algo == FF_DCT_MMX))
  741. c->fdct = ff_fdct_mmxext;
  742. c->sum_abs_dctelem = sum_abs_dctelem_mmxext;
  743. c->vsad[4] = vsad_intra16_mmxext;
  744. if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
  745. c->vsad[0] = vsad16_mmxext;
  746. }
  747. }
  748. if (INLINE_SSE2(cpu_flags)) {
  749. if (!high_bit_depth &&
  750. (dct_algo == FF_DCT_AUTO || dct_algo == FF_DCT_MMX))
  751. c->fdct = ff_fdct_sse2;
  752. c->sum_abs_dctelem = sum_abs_dctelem_sse2;
  753. }
  754. #if HAVE_SSSE3_INLINE
  755. if (INLINE_SSSE3(cpu_flags)) {
  756. c->sum_abs_dctelem = sum_abs_dctelem_ssse3;
  757. }
  758. #endif
  759. #endif /* HAVE_INLINE_ASM */
  760. if (EXTERNAL_MMX(cpu_flags)) {
  761. c->hadamard8_diff[0] = ff_hadamard8_diff16_mmx;
  762. c->hadamard8_diff[1] = ff_hadamard8_diff_mmx;
  763. }
  764. if (EXTERNAL_MMXEXT(cpu_flags)) {
  765. c->hadamard8_diff[0] = ff_hadamard8_diff16_mmxext;
  766. c->hadamard8_diff[1] = ff_hadamard8_diff_mmxext;
  767. }
  768. if (EXTERNAL_SSE2(cpu_flags)) {
  769. c->sse[0] = ff_sse16_sse2;
  770. #if HAVE_ALIGNED_STACK
  771. c->hadamard8_diff[0] = ff_hadamard8_diff16_sse2;
  772. c->hadamard8_diff[1] = ff_hadamard8_diff_sse2;
  773. #endif
  774. }
  775. if (EXTERNAL_SSSE3(cpu_flags) && HAVE_ALIGNED_STACK) {
  776. c->hadamard8_diff[0] = ff_hadamard8_diff16_ssse3;
  777. c->hadamard8_diff[1] = ff_hadamard8_diff_ssse3;
  778. }
  779. ff_dsputil_init_pix_mmx(c, avctx);
  780. }