You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1200 lines
35KB

  1. /*
  2. * MMX optimized DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of Libav.
  7. *
  8. * Libav is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * Libav is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with Libav; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. *
  22. * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
  23. */
  24. #include "libavutil/cpu.h"
  25. #include "libavutil/x86_cpu.h"
  26. #include "libavcodec/dsputil.h"
  27. #include "libavcodec/mpegvideo.h"
  28. #include "libavcodec/mathops.h"
  29. #include "dsputil_mmx.h"
  30. static void get_pixels_mmx(DCTELEM *block, const uint8_t *pixels, int line_size)
  31. {
  32. __asm__ volatile(
  33. "mov $-128, %%"REG_a" \n\t"
  34. "pxor %%mm7, %%mm7 \n\t"
  35. ".p2align 4 \n\t"
  36. "1: \n\t"
  37. "movq (%0), %%mm0 \n\t"
  38. "movq (%0, %2), %%mm2 \n\t"
  39. "movq %%mm0, %%mm1 \n\t"
  40. "movq %%mm2, %%mm3 \n\t"
  41. "punpcklbw %%mm7, %%mm0 \n\t"
  42. "punpckhbw %%mm7, %%mm1 \n\t"
  43. "punpcklbw %%mm7, %%mm2 \n\t"
  44. "punpckhbw %%mm7, %%mm3 \n\t"
  45. "movq %%mm0, (%1, %%"REG_a") \n\t"
  46. "movq %%mm1, 8(%1, %%"REG_a") \n\t"
  47. "movq %%mm2, 16(%1, %%"REG_a") \n\t"
  48. "movq %%mm3, 24(%1, %%"REG_a") \n\t"
  49. "add %3, %0 \n\t"
  50. "add $32, %%"REG_a" \n\t"
  51. "js 1b \n\t"
  52. : "+r" (pixels)
  53. : "r" (block+64), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*2)
  54. : "%"REG_a
  55. );
  56. }
  57. static void get_pixels_sse2(DCTELEM *block, const uint8_t *pixels, int line_size)
  58. {
  59. __asm__ volatile(
  60. "pxor %%xmm4, %%xmm4 \n\t"
  61. "movq (%0), %%xmm0 \n\t"
  62. "movq (%0, %2), %%xmm1 \n\t"
  63. "movq (%0, %2,2), %%xmm2 \n\t"
  64. "movq (%0, %3), %%xmm3 \n\t"
  65. "lea (%0,%2,4), %0 \n\t"
  66. "punpcklbw %%xmm4, %%xmm0 \n\t"
  67. "punpcklbw %%xmm4, %%xmm1 \n\t"
  68. "punpcklbw %%xmm4, %%xmm2 \n\t"
  69. "punpcklbw %%xmm4, %%xmm3 \n\t"
  70. "movdqa %%xmm0, (%1) \n\t"
  71. "movdqa %%xmm1, 16(%1) \n\t"
  72. "movdqa %%xmm2, 32(%1) \n\t"
  73. "movdqa %%xmm3, 48(%1) \n\t"
  74. "movq (%0), %%xmm0 \n\t"
  75. "movq (%0, %2), %%xmm1 \n\t"
  76. "movq (%0, %2,2), %%xmm2 \n\t"
  77. "movq (%0, %3), %%xmm3 \n\t"
  78. "punpcklbw %%xmm4, %%xmm0 \n\t"
  79. "punpcklbw %%xmm4, %%xmm1 \n\t"
  80. "punpcklbw %%xmm4, %%xmm2 \n\t"
  81. "punpcklbw %%xmm4, %%xmm3 \n\t"
  82. "movdqa %%xmm0, 64(%1) \n\t"
  83. "movdqa %%xmm1, 80(%1) \n\t"
  84. "movdqa %%xmm2, 96(%1) \n\t"
  85. "movdqa %%xmm3, 112(%1) \n\t"
  86. : "+r" (pixels)
  87. : "r" (block), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3)
  88. );
  89. }
  90. static inline void diff_pixels_mmx(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride)
  91. {
  92. __asm__ volatile(
  93. "pxor %%mm7, %%mm7 \n\t"
  94. "mov $-128, %%"REG_a" \n\t"
  95. ".p2align 4 \n\t"
  96. "1: \n\t"
  97. "movq (%0), %%mm0 \n\t"
  98. "movq (%1), %%mm2 \n\t"
  99. "movq %%mm0, %%mm1 \n\t"
  100. "movq %%mm2, %%mm3 \n\t"
  101. "punpcklbw %%mm7, %%mm0 \n\t"
  102. "punpckhbw %%mm7, %%mm1 \n\t"
  103. "punpcklbw %%mm7, %%mm2 \n\t"
  104. "punpckhbw %%mm7, %%mm3 \n\t"
  105. "psubw %%mm2, %%mm0 \n\t"
  106. "psubw %%mm3, %%mm1 \n\t"
  107. "movq %%mm0, (%2, %%"REG_a") \n\t"
  108. "movq %%mm1, 8(%2, %%"REG_a") \n\t"
  109. "add %3, %0 \n\t"
  110. "add %3, %1 \n\t"
  111. "add $16, %%"REG_a" \n\t"
  112. "jnz 1b \n\t"
  113. : "+r" (s1), "+r" (s2)
  114. : "r" (block+64), "r" ((x86_reg)stride)
  115. : "%"REG_a
  116. );
  117. }
  118. static int pix_sum16_mmx(uint8_t * pix, int line_size){
  119. const int h=16;
  120. int sum;
  121. x86_reg index= -line_size*h;
  122. __asm__ volatile(
  123. "pxor %%mm7, %%mm7 \n\t"
  124. "pxor %%mm6, %%mm6 \n\t"
  125. "1: \n\t"
  126. "movq (%2, %1), %%mm0 \n\t"
  127. "movq (%2, %1), %%mm1 \n\t"
  128. "movq 8(%2, %1), %%mm2 \n\t"
  129. "movq 8(%2, %1), %%mm3 \n\t"
  130. "punpcklbw %%mm7, %%mm0 \n\t"
  131. "punpckhbw %%mm7, %%mm1 \n\t"
  132. "punpcklbw %%mm7, %%mm2 \n\t"
  133. "punpckhbw %%mm7, %%mm3 \n\t"
  134. "paddw %%mm0, %%mm1 \n\t"
  135. "paddw %%mm2, %%mm3 \n\t"
  136. "paddw %%mm1, %%mm3 \n\t"
  137. "paddw %%mm3, %%mm6 \n\t"
  138. "add %3, %1 \n\t"
  139. " js 1b \n\t"
  140. "movq %%mm6, %%mm5 \n\t"
  141. "psrlq $32, %%mm6 \n\t"
  142. "paddw %%mm5, %%mm6 \n\t"
  143. "movq %%mm6, %%mm5 \n\t"
  144. "psrlq $16, %%mm6 \n\t"
  145. "paddw %%mm5, %%mm6 \n\t"
  146. "movd %%mm6, %0 \n\t"
  147. "andl $0xFFFF, %0 \n\t"
  148. : "=&r" (sum), "+r" (index)
  149. : "r" (pix - index), "r" ((x86_reg)line_size)
  150. );
  151. return sum;
  152. }
  153. static int pix_norm1_mmx(uint8_t *pix, int line_size) {
  154. int tmp;
  155. __asm__ volatile (
  156. "movl $16,%%ecx\n"
  157. "pxor %%mm0,%%mm0\n"
  158. "pxor %%mm7,%%mm7\n"
  159. "1:\n"
  160. "movq (%0),%%mm2\n" /* mm2 = pix[0-7] */
  161. "movq 8(%0),%%mm3\n" /* mm3 = pix[8-15] */
  162. "movq %%mm2,%%mm1\n" /* mm1 = mm2 = pix[0-7] */
  163. "punpckhbw %%mm0,%%mm1\n" /* mm1 = [pix4-7] */
  164. "punpcklbw %%mm0,%%mm2\n" /* mm2 = [pix0-3] */
  165. "movq %%mm3,%%mm4\n" /* mm4 = mm3 = pix[8-15] */
  166. "punpckhbw %%mm0,%%mm3\n" /* mm3 = [pix12-15] */
  167. "punpcklbw %%mm0,%%mm4\n" /* mm4 = [pix8-11] */
  168. "pmaddwd %%mm1,%%mm1\n" /* mm1 = (pix0^2+pix1^2,pix2^2+pix3^2) */
  169. "pmaddwd %%mm2,%%mm2\n" /* mm2 = (pix4^2+pix5^2,pix6^2+pix7^2) */
  170. "pmaddwd %%mm3,%%mm3\n"
  171. "pmaddwd %%mm4,%%mm4\n"
  172. "paddd %%mm1,%%mm2\n" /* mm2 = (pix0^2+pix1^2+pix4^2+pix5^2,
  173. pix2^2+pix3^2+pix6^2+pix7^2) */
  174. "paddd %%mm3,%%mm4\n"
  175. "paddd %%mm2,%%mm7\n"
  176. "add %2, %0\n"
  177. "paddd %%mm4,%%mm7\n"
  178. "dec %%ecx\n"
  179. "jnz 1b\n"
  180. "movq %%mm7,%%mm1\n"
  181. "psrlq $32, %%mm7\n" /* shift hi dword to lo */
  182. "paddd %%mm7,%%mm1\n"
  183. "movd %%mm1,%1\n"
  184. : "+r" (pix), "=r"(tmp) : "r" ((x86_reg)line_size) : "%ecx" );
  185. return tmp;
  186. }
  187. static int sse8_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  188. int tmp;
  189. __asm__ volatile (
  190. "movl %4,%%ecx\n"
  191. "shr $1,%%ecx\n"
  192. "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
  193. "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
  194. "1:\n"
  195. "movq (%0),%%mm1\n" /* mm1 = pix1[0][0-7] */
  196. "movq (%1),%%mm2\n" /* mm2 = pix2[0][0-7] */
  197. "movq (%0,%3),%%mm3\n" /* mm3 = pix1[1][0-7] */
  198. "movq (%1,%3),%%mm4\n" /* mm4 = pix2[1][0-7] */
  199. /* todo: mm1-mm2, mm3-mm4 */
  200. /* algo: subtract mm1 from mm2 with saturation and vice versa */
  201. /* OR the results to get absolute difference */
  202. "movq %%mm1,%%mm5\n"
  203. "movq %%mm3,%%mm6\n"
  204. "psubusb %%mm2,%%mm1\n"
  205. "psubusb %%mm4,%%mm3\n"
  206. "psubusb %%mm5,%%mm2\n"
  207. "psubusb %%mm6,%%mm4\n"
  208. "por %%mm1,%%mm2\n"
  209. "por %%mm3,%%mm4\n"
  210. /* now convert to 16-bit vectors so we can square them */
  211. "movq %%mm2,%%mm1\n"
  212. "movq %%mm4,%%mm3\n"
  213. "punpckhbw %%mm0,%%mm2\n"
  214. "punpckhbw %%mm0,%%mm4\n"
  215. "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
  216. "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
  217. "pmaddwd %%mm2,%%mm2\n"
  218. "pmaddwd %%mm4,%%mm4\n"
  219. "pmaddwd %%mm1,%%mm1\n"
  220. "pmaddwd %%mm3,%%mm3\n"
  221. "lea (%0,%3,2), %0\n" /* pix1 += 2*line_size */
  222. "lea (%1,%3,2), %1\n" /* pix2 += 2*line_size */
  223. "paddd %%mm2,%%mm1\n"
  224. "paddd %%mm4,%%mm3\n"
  225. "paddd %%mm1,%%mm7\n"
  226. "paddd %%mm3,%%mm7\n"
  227. "decl %%ecx\n"
  228. "jnz 1b\n"
  229. "movq %%mm7,%%mm1\n"
  230. "psrlq $32, %%mm7\n" /* shift hi dword to lo */
  231. "paddd %%mm7,%%mm1\n"
  232. "movd %%mm1,%2\n"
  233. : "+r" (pix1), "+r" (pix2), "=r"(tmp)
  234. : "r" ((x86_reg)line_size) , "m" (h)
  235. : "%ecx");
  236. return tmp;
  237. }
  238. static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  239. int tmp;
  240. __asm__ volatile (
  241. "movl %4,%%ecx\n"
  242. "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
  243. "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
  244. "1:\n"
  245. "movq (%0),%%mm1\n" /* mm1 = pix1[0-7] */
  246. "movq (%1),%%mm2\n" /* mm2 = pix2[0-7] */
  247. "movq 8(%0),%%mm3\n" /* mm3 = pix1[8-15] */
  248. "movq 8(%1),%%mm4\n" /* mm4 = pix2[8-15] */
  249. /* todo: mm1-mm2, mm3-mm4 */
  250. /* algo: subtract mm1 from mm2 with saturation and vice versa */
  251. /* OR the results to get absolute difference */
  252. "movq %%mm1,%%mm5\n"
  253. "movq %%mm3,%%mm6\n"
  254. "psubusb %%mm2,%%mm1\n"
  255. "psubusb %%mm4,%%mm3\n"
  256. "psubusb %%mm5,%%mm2\n"
  257. "psubusb %%mm6,%%mm4\n"
  258. "por %%mm1,%%mm2\n"
  259. "por %%mm3,%%mm4\n"
  260. /* now convert to 16-bit vectors so we can square them */
  261. "movq %%mm2,%%mm1\n"
  262. "movq %%mm4,%%mm3\n"
  263. "punpckhbw %%mm0,%%mm2\n"
  264. "punpckhbw %%mm0,%%mm4\n"
  265. "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
  266. "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
  267. "pmaddwd %%mm2,%%mm2\n"
  268. "pmaddwd %%mm4,%%mm4\n"
  269. "pmaddwd %%mm1,%%mm1\n"
  270. "pmaddwd %%mm3,%%mm3\n"
  271. "add %3,%0\n"
  272. "add %3,%1\n"
  273. "paddd %%mm2,%%mm1\n"
  274. "paddd %%mm4,%%mm3\n"
  275. "paddd %%mm1,%%mm7\n"
  276. "paddd %%mm3,%%mm7\n"
  277. "decl %%ecx\n"
  278. "jnz 1b\n"
  279. "movq %%mm7,%%mm1\n"
  280. "psrlq $32, %%mm7\n" /* shift hi dword to lo */
  281. "paddd %%mm7,%%mm1\n"
  282. "movd %%mm1,%2\n"
  283. : "+r" (pix1), "+r" (pix2), "=r"(tmp)
  284. : "r" ((x86_reg)line_size) , "m" (h)
  285. : "%ecx");
  286. return tmp;
  287. }
  288. int ff_sse16_sse2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h);
  289. static int hf_noise8_mmx(uint8_t * pix1, int line_size, int h) {
  290. int tmp;
  291. __asm__ volatile (
  292. "movl %3,%%ecx\n"
  293. "pxor %%mm7,%%mm7\n"
  294. "pxor %%mm6,%%mm6\n"
  295. "movq (%0),%%mm0\n"
  296. "movq %%mm0, %%mm1\n"
  297. "psllq $8, %%mm0\n"
  298. "psrlq $8, %%mm1\n"
  299. "psrlq $8, %%mm0\n"
  300. "movq %%mm0, %%mm2\n"
  301. "movq %%mm1, %%mm3\n"
  302. "punpcklbw %%mm7,%%mm0\n"
  303. "punpcklbw %%mm7,%%mm1\n"
  304. "punpckhbw %%mm7,%%mm2\n"
  305. "punpckhbw %%mm7,%%mm3\n"
  306. "psubw %%mm1, %%mm0\n"
  307. "psubw %%mm3, %%mm2\n"
  308. "add %2,%0\n"
  309. "movq (%0),%%mm4\n"
  310. "movq %%mm4, %%mm1\n"
  311. "psllq $8, %%mm4\n"
  312. "psrlq $8, %%mm1\n"
  313. "psrlq $8, %%mm4\n"
  314. "movq %%mm4, %%mm5\n"
  315. "movq %%mm1, %%mm3\n"
  316. "punpcklbw %%mm7,%%mm4\n"
  317. "punpcklbw %%mm7,%%mm1\n"
  318. "punpckhbw %%mm7,%%mm5\n"
  319. "punpckhbw %%mm7,%%mm3\n"
  320. "psubw %%mm1, %%mm4\n"
  321. "psubw %%mm3, %%mm5\n"
  322. "psubw %%mm4, %%mm0\n"
  323. "psubw %%mm5, %%mm2\n"
  324. "pxor %%mm3, %%mm3\n"
  325. "pxor %%mm1, %%mm1\n"
  326. "pcmpgtw %%mm0, %%mm3\n\t"
  327. "pcmpgtw %%mm2, %%mm1\n\t"
  328. "pxor %%mm3, %%mm0\n"
  329. "pxor %%mm1, %%mm2\n"
  330. "psubw %%mm3, %%mm0\n"
  331. "psubw %%mm1, %%mm2\n"
  332. "paddw %%mm0, %%mm2\n"
  333. "paddw %%mm2, %%mm6\n"
  334. "add %2,%0\n"
  335. "1:\n"
  336. "movq (%0),%%mm0\n"
  337. "movq %%mm0, %%mm1\n"
  338. "psllq $8, %%mm0\n"
  339. "psrlq $8, %%mm1\n"
  340. "psrlq $8, %%mm0\n"
  341. "movq %%mm0, %%mm2\n"
  342. "movq %%mm1, %%mm3\n"
  343. "punpcklbw %%mm7,%%mm0\n"
  344. "punpcklbw %%mm7,%%mm1\n"
  345. "punpckhbw %%mm7,%%mm2\n"
  346. "punpckhbw %%mm7,%%mm3\n"
  347. "psubw %%mm1, %%mm0\n"
  348. "psubw %%mm3, %%mm2\n"
  349. "psubw %%mm0, %%mm4\n"
  350. "psubw %%mm2, %%mm5\n"
  351. "pxor %%mm3, %%mm3\n"
  352. "pxor %%mm1, %%mm1\n"
  353. "pcmpgtw %%mm4, %%mm3\n\t"
  354. "pcmpgtw %%mm5, %%mm1\n\t"
  355. "pxor %%mm3, %%mm4\n"
  356. "pxor %%mm1, %%mm5\n"
  357. "psubw %%mm3, %%mm4\n"
  358. "psubw %%mm1, %%mm5\n"
  359. "paddw %%mm4, %%mm5\n"
  360. "paddw %%mm5, %%mm6\n"
  361. "add %2,%0\n"
  362. "movq (%0),%%mm4\n"
  363. "movq %%mm4, %%mm1\n"
  364. "psllq $8, %%mm4\n"
  365. "psrlq $8, %%mm1\n"
  366. "psrlq $8, %%mm4\n"
  367. "movq %%mm4, %%mm5\n"
  368. "movq %%mm1, %%mm3\n"
  369. "punpcklbw %%mm7,%%mm4\n"
  370. "punpcklbw %%mm7,%%mm1\n"
  371. "punpckhbw %%mm7,%%mm5\n"
  372. "punpckhbw %%mm7,%%mm3\n"
  373. "psubw %%mm1, %%mm4\n"
  374. "psubw %%mm3, %%mm5\n"
  375. "psubw %%mm4, %%mm0\n"
  376. "psubw %%mm5, %%mm2\n"
  377. "pxor %%mm3, %%mm3\n"
  378. "pxor %%mm1, %%mm1\n"
  379. "pcmpgtw %%mm0, %%mm3\n\t"
  380. "pcmpgtw %%mm2, %%mm1\n\t"
  381. "pxor %%mm3, %%mm0\n"
  382. "pxor %%mm1, %%mm2\n"
  383. "psubw %%mm3, %%mm0\n"
  384. "psubw %%mm1, %%mm2\n"
  385. "paddw %%mm0, %%mm2\n"
  386. "paddw %%mm2, %%mm6\n"
  387. "add %2,%0\n"
  388. "subl $2, %%ecx\n"
  389. " jnz 1b\n"
  390. "movq %%mm6, %%mm0\n"
  391. "punpcklwd %%mm7,%%mm0\n"
  392. "punpckhwd %%mm7,%%mm6\n"
  393. "paddd %%mm0, %%mm6\n"
  394. "movq %%mm6,%%mm0\n"
  395. "psrlq $32, %%mm6\n"
  396. "paddd %%mm6,%%mm0\n"
  397. "movd %%mm0,%1\n"
  398. : "+r" (pix1), "=r"(tmp)
  399. : "r" ((x86_reg)line_size) , "g" (h-2)
  400. : "%ecx");
  401. return tmp;
  402. }
  403. static int hf_noise16_mmx(uint8_t * pix1, int line_size, int h) {
  404. int tmp;
  405. uint8_t * pix= pix1;
  406. __asm__ volatile (
  407. "movl %3,%%ecx\n"
  408. "pxor %%mm7,%%mm7\n"
  409. "pxor %%mm6,%%mm6\n"
  410. "movq (%0),%%mm0\n"
  411. "movq 1(%0),%%mm1\n"
  412. "movq %%mm0, %%mm2\n"
  413. "movq %%mm1, %%mm3\n"
  414. "punpcklbw %%mm7,%%mm0\n"
  415. "punpcklbw %%mm7,%%mm1\n"
  416. "punpckhbw %%mm7,%%mm2\n"
  417. "punpckhbw %%mm7,%%mm3\n"
  418. "psubw %%mm1, %%mm0\n"
  419. "psubw %%mm3, %%mm2\n"
  420. "add %2,%0\n"
  421. "movq (%0),%%mm4\n"
  422. "movq 1(%0),%%mm1\n"
  423. "movq %%mm4, %%mm5\n"
  424. "movq %%mm1, %%mm3\n"
  425. "punpcklbw %%mm7,%%mm4\n"
  426. "punpcklbw %%mm7,%%mm1\n"
  427. "punpckhbw %%mm7,%%mm5\n"
  428. "punpckhbw %%mm7,%%mm3\n"
  429. "psubw %%mm1, %%mm4\n"
  430. "psubw %%mm3, %%mm5\n"
  431. "psubw %%mm4, %%mm0\n"
  432. "psubw %%mm5, %%mm2\n"
  433. "pxor %%mm3, %%mm3\n"
  434. "pxor %%mm1, %%mm1\n"
  435. "pcmpgtw %%mm0, %%mm3\n\t"
  436. "pcmpgtw %%mm2, %%mm1\n\t"
  437. "pxor %%mm3, %%mm0\n"
  438. "pxor %%mm1, %%mm2\n"
  439. "psubw %%mm3, %%mm0\n"
  440. "psubw %%mm1, %%mm2\n"
  441. "paddw %%mm0, %%mm2\n"
  442. "paddw %%mm2, %%mm6\n"
  443. "add %2,%0\n"
  444. "1:\n"
  445. "movq (%0),%%mm0\n"
  446. "movq 1(%0),%%mm1\n"
  447. "movq %%mm0, %%mm2\n"
  448. "movq %%mm1, %%mm3\n"
  449. "punpcklbw %%mm7,%%mm0\n"
  450. "punpcklbw %%mm7,%%mm1\n"
  451. "punpckhbw %%mm7,%%mm2\n"
  452. "punpckhbw %%mm7,%%mm3\n"
  453. "psubw %%mm1, %%mm0\n"
  454. "psubw %%mm3, %%mm2\n"
  455. "psubw %%mm0, %%mm4\n"
  456. "psubw %%mm2, %%mm5\n"
  457. "pxor %%mm3, %%mm3\n"
  458. "pxor %%mm1, %%mm1\n"
  459. "pcmpgtw %%mm4, %%mm3\n\t"
  460. "pcmpgtw %%mm5, %%mm1\n\t"
  461. "pxor %%mm3, %%mm4\n"
  462. "pxor %%mm1, %%mm5\n"
  463. "psubw %%mm3, %%mm4\n"
  464. "psubw %%mm1, %%mm5\n"
  465. "paddw %%mm4, %%mm5\n"
  466. "paddw %%mm5, %%mm6\n"
  467. "add %2,%0\n"
  468. "movq (%0),%%mm4\n"
  469. "movq 1(%0),%%mm1\n"
  470. "movq %%mm4, %%mm5\n"
  471. "movq %%mm1, %%mm3\n"
  472. "punpcklbw %%mm7,%%mm4\n"
  473. "punpcklbw %%mm7,%%mm1\n"
  474. "punpckhbw %%mm7,%%mm5\n"
  475. "punpckhbw %%mm7,%%mm3\n"
  476. "psubw %%mm1, %%mm4\n"
  477. "psubw %%mm3, %%mm5\n"
  478. "psubw %%mm4, %%mm0\n"
  479. "psubw %%mm5, %%mm2\n"
  480. "pxor %%mm3, %%mm3\n"
  481. "pxor %%mm1, %%mm1\n"
  482. "pcmpgtw %%mm0, %%mm3\n\t"
  483. "pcmpgtw %%mm2, %%mm1\n\t"
  484. "pxor %%mm3, %%mm0\n"
  485. "pxor %%mm1, %%mm2\n"
  486. "psubw %%mm3, %%mm0\n"
  487. "psubw %%mm1, %%mm2\n"
  488. "paddw %%mm0, %%mm2\n"
  489. "paddw %%mm2, %%mm6\n"
  490. "add %2,%0\n"
  491. "subl $2, %%ecx\n"
  492. " jnz 1b\n"
  493. "movq %%mm6, %%mm0\n"
  494. "punpcklwd %%mm7,%%mm0\n"
  495. "punpckhwd %%mm7,%%mm6\n"
  496. "paddd %%mm0, %%mm6\n"
  497. "movq %%mm6,%%mm0\n"
  498. "psrlq $32, %%mm6\n"
  499. "paddd %%mm6,%%mm0\n"
  500. "movd %%mm0,%1\n"
  501. : "+r" (pix1), "=r"(tmp)
  502. : "r" ((x86_reg)line_size) , "g" (h-2)
  503. : "%ecx");
  504. return tmp + hf_noise8_mmx(pix+8, line_size, h);
  505. }
  506. static int nsse16_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  507. MpegEncContext *c = p;
  508. int score1, score2;
  509. if(c) score1 = c->dsp.sse[0](c, pix1, pix2, line_size, h);
  510. else score1 = sse16_mmx(c, pix1, pix2, line_size, h);
  511. score2= hf_noise16_mmx(pix1, line_size, h) - hf_noise16_mmx(pix2, line_size, h);
  512. if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
  513. else return score1 + FFABS(score2)*8;
  514. }
  515. static int nsse8_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  516. MpegEncContext *c = p;
  517. int score1= sse8_mmx(c, pix1, pix2, line_size, h);
  518. int score2= hf_noise8_mmx(pix1, line_size, h) - hf_noise8_mmx(pix2, line_size, h);
  519. if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
  520. else return score1 + FFABS(score2)*8;
  521. }
  522. static int vsad_intra16_mmx(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
  523. int tmp;
  524. assert( (((int)pix) & 7) == 0);
  525. assert((line_size &7) ==0);
  526. #define SUM(in0, in1, out0, out1) \
  527. "movq (%0), %%mm2\n"\
  528. "movq 8(%0), %%mm3\n"\
  529. "add %2,%0\n"\
  530. "movq %%mm2, " #out0 "\n"\
  531. "movq %%mm3, " #out1 "\n"\
  532. "psubusb " #in0 ", %%mm2\n"\
  533. "psubusb " #in1 ", %%mm3\n"\
  534. "psubusb " #out0 ", " #in0 "\n"\
  535. "psubusb " #out1 ", " #in1 "\n"\
  536. "por %%mm2, " #in0 "\n"\
  537. "por %%mm3, " #in1 "\n"\
  538. "movq " #in0 ", %%mm2\n"\
  539. "movq " #in1 ", %%mm3\n"\
  540. "punpcklbw %%mm7, " #in0 "\n"\
  541. "punpcklbw %%mm7, " #in1 "\n"\
  542. "punpckhbw %%mm7, %%mm2\n"\
  543. "punpckhbw %%mm7, %%mm3\n"\
  544. "paddw " #in1 ", " #in0 "\n"\
  545. "paddw %%mm3, %%mm2\n"\
  546. "paddw %%mm2, " #in0 "\n"\
  547. "paddw " #in0 ", %%mm6\n"
  548. __asm__ volatile (
  549. "movl %3,%%ecx\n"
  550. "pxor %%mm6,%%mm6\n"
  551. "pxor %%mm7,%%mm7\n"
  552. "movq (%0),%%mm0\n"
  553. "movq 8(%0),%%mm1\n"
  554. "add %2,%0\n"
  555. "jmp 2f\n"
  556. "1:\n"
  557. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  558. "2:\n"
  559. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  560. "subl $2, %%ecx\n"
  561. "jnz 1b\n"
  562. "movq %%mm6,%%mm0\n"
  563. "psrlq $32, %%mm6\n"
  564. "paddw %%mm6,%%mm0\n"
  565. "movq %%mm0,%%mm6\n"
  566. "psrlq $16, %%mm0\n"
  567. "paddw %%mm6,%%mm0\n"
  568. "movd %%mm0,%1\n"
  569. : "+r" (pix), "=r"(tmp)
  570. : "r" ((x86_reg)line_size) , "m" (h)
  571. : "%ecx");
  572. return tmp & 0xFFFF;
  573. }
  574. #undef SUM
  575. static int vsad_intra16_mmx2(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
  576. int tmp;
  577. assert( (((int)pix) & 7) == 0);
  578. assert((line_size &7) ==0);
  579. #define SUM(in0, in1, out0, out1) \
  580. "movq (%0), " #out0 "\n"\
  581. "movq 8(%0), " #out1 "\n"\
  582. "add %2,%0\n"\
  583. "psadbw " #out0 ", " #in0 "\n"\
  584. "psadbw " #out1 ", " #in1 "\n"\
  585. "paddw " #in1 ", " #in0 "\n"\
  586. "paddw " #in0 ", %%mm6\n"
  587. __asm__ volatile (
  588. "movl %3,%%ecx\n"
  589. "pxor %%mm6,%%mm6\n"
  590. "pxor %%mm7,%%mm7\n"
  591. "movq (%0),%%mm0\n"
  592. "movq 8(%0),%%mm1\n"
  593. "add %2,%0\n"
  594. "jmp 2f\n"
  595. "1:\n"
  596. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  597. "2:\n"
  598. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  599. "subl $2, %%ecx\n"
  600. "jnz 1b\n"
  601. "movd %%mm6,%1\n"
  602. : "+r" (pix), "=r"(tmp)
  603. : "r" ((x86_reg)line_size) , "m" (h)
  604. : "%ecx");
  605. return tmp;
  606. }
  607. #undef SUM
  608. static int vsad16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  609. int tmp;
  610. assert( (((int)pix1) & 7) == 0);
  611. assert( (((int)pix2) & 7) == 0);
  612. assert((line_size &7) ==0);
  613. #define SUM(in0, in1, out0, out1) \
  614. "movq (%0),%%mm2\n"\
  615. "movq (%1)," #out0 "\n"\
  616. "movq 8(%0),%%mm3\n"\
  617. "movq 8(%1)," #out1 "\n"\
  618. "add %3,%0\n"\
  619. "add %3,%1\n"\
  620. "psubb " #out0 ", %%mm2\n"\
  621. "psubb " #out1 ", %%mm3\n"\
  622. "pxor %%mm7, %%mm2\n"\
  623. "pxor %%mm7, %%mm3\n"\
  624. "movq %%mm2, " #out0 "\n"\
  625. "movq %%mm3, " #out1 "\n"\
  626. "psubusb " #in0 ", %%mm2\n"\
  627. "psubusb " #in1 ", %%mm3\n"\
  628. "psubusb " #out0 ", " #in0 "\n"\
  629. "psubusb " #out1 ", " #in1 "\n"\
  630. "por %%mm2, " #in0 "\n"\
  631. "por %%mm3, " #in1 "\n"\
  632. "movq " #in0 ", %%mm2\n"\
  633. "movq " #in1 ", %%mm3\n"\
  634. "punpcklbw %%mm7, " #in0 "\n"\
  635. "punpcklbw %%mm7, " #in1 "\n"\
  636. "punpckhbw %%mm7, %%mm2\n"\
  637. "punpckhbw %%mm7, %%mm3\n"\
  638. "paddw " #in1 ", " #in0 "\n"\
  639. "paddw %%mm3, %%mm2\n"\
  640. "paddw %%mm2, " #in0 "\n"\
  641. "paddw " #in0 ", %%mm6\n"
  642. __asm__ volatile (
  643. "movl %4,%%ecx\n"
  644. "pxor %%mm6,%%mm6\n"
  645. "pcmpeqw %%mm7,%%mm7\n"
  646. "psllw $15, %%mm7\n"
  647. "packsswb %%mm7, %%mm7\n"
  648. "movq (%0),%%mm0\n"
  649. "movq (%1),%%mm2\n"
  650. "movq 8(%0),%%mm1\n"
  651. "movq 8(%1),%%mm3\n"
  652. "add %3,%0\n"
  653. "add %3,%1\n"
  654. "psubb %%mm2, %%mm0\n"
  655. "psubb %%mm3, %%mm1\n"
  656. "pxor %%mm7, %%mm0\n"
  657. "pxor %%mm7, %%mm1\n"
  658. "jmp 2f\n"
  659. "1:\n"
  660. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  661. "2:\n"
  662. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  663. "subl $2, %%ecx\n"
  664. "jnz 1b\n"
  665. "movq %%mm6,%%mm0\n"
  666. "psrlq $32, %%mm6\n"
  667. "paddw %%mm6,%%mm0\n"
  668. "movq %%mm0,%%mm6\n"
  669. "psrlq $16, %%mm0\n"
  670. "paddw %%mm6,%%mm0\n"
  671. "movd %%mm0,%2\n"
  672. : "+r" (pix1), "+r" (pix2), "=r"(tmp)
  673. : "r" ((x86_reg)line_size) , "m" (h)
  674. : "%ecx");
  675. return tmp & 0x7FFF;
  676. }
  677. #undef SUM
  678. static int vsad16_mmx2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  679. int tmp;
  680. assert( (((int)pix1) & 7) == 0);
  681. assert( (((int)pix2) & 7) == 0);
  682. assert((line_size &7) ==0);
  683. #define SUM(in0, in1, out0, out1) \
  684. "movq (%0)," #out0 "\n"\
  685. "movq (%1),%%mm2\n"\
  686. "movq 8(%0)," #out1 "\n"\
  687. "movq 8(%1),%%mm3\n"\
  688. "add %3,%0\n"\
  689. "add %3,%1\n"\
  690. "psubb %%mm2, " #out0 "\n"\
  691. "psubb %%mm3, " #out1 "\n"\
  692. "pxor %%mm7, " #out0 "\n"\
  693. "pxor %%mm7, " #out1 "\n"\
  694. "psadbw " #out0 ", " #in0 "\n"\
  695. "psadbw " #out1 ", " #in1 "\n"\
  696. "paddw " #in1 ", " #in0 "\n"\
  697. "paddw " #in0 ", %%mm6\n"
  698. __asm__ volatile (
  699. "movl %4,%%ecx\n"
  700. "pxor %%mm6,%%mm6\n"
  701. "pcmpeqw %%mm7,%%mm7\n"
  702. "psllw $15, %%mm7\n"
  703. "packsswb %%mm7, %%mm7\n"
  704. "movq (%0),%%mm0\n"
  705. "movq (%1),%%mm2\n"
  706. "movq 8(%0),%%mm1\n"
  707. "movq 8(%1),%%mm3\n"
  708. "add %3,%0\n"
  709. "add %3,%1\n"
  710. "psubb %%mm2, %%mm0\n"
  711. "psubb %%mm3, %%mm1\n"
  712. "pxor %%mm7, %%mm0\n"
  713. "pxor %%mm7, %%mm1\n"
  714. "jmp 2f\n"
  715. "1:\n"
  716. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  717. "2:\n"
  718. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  719. "subl $2, %%ecx\n"
  720. "jnz 1b\n"
  721. "movd %%mm6,%2\n"
  722. : "+r" (pix1), "+r" (pix2), "=r"(tmp)
  723. : "r" ((x86_reg)line_size) , "m" (h)
  724. : "%ecx");
  725. return tmp;
  726. }
  727. #undef SUM
  728. static void diff_bytes_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
  729. x86_reg i=0;
  730. __asm__ volatile(
  731. "1: \n\t"
  732. "movq (%2, %0), %%mm0 \n\t"
  733. "movq (%1, %0), %%mm1 \n\t"
  734. "psubb %%mm0, %%mm1 \n\t"
  735. "movq %%mm1, (%3, %0) \n\t"
  736. "movq 8(%2, %0), %%mm0 \n\t"
  737. "movq 8(%1, %0), %%mm1 \n\t"
  738. "psubb %%mm0, %%mm1 \n\t"
  739. "movq %%mm1, 8(%3, %0) \n\t"
  740. "add $16, %0 \n\t"
  741. "cmp %4, %0 \n\t"
  742. " jb 1b \n\t"
  743. : "+r" (i)
  744. : "r"(src1), "r"(src2), "r"(dst), "r"((x86_reg)w-15)
  745. );
  746. for(; i<w; i++)
  747. dst[i+0] = src1[i+0]-src2[i+0];
  748. }
  749. static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w, int *left, int *left_top){
  750. x86_reg i=0;
  751. uint8_t l, lt;
  752. __asm__ volatile(
  753. "1: \n\t"
  754. "movq -1(%1, %0), %%mm0 \n\t" // LT
  755. "movq (%1, %0), %%mm1 \n\t" // T
  756. "movq -1(%2, %0), %%mm2 \n\t" // L
  757. "movq (%2, %0), %%mm3 \n\t" // X
  758. "movq %%mm2, %%mm4 \n\t" // L
  759. "psubb %%mm0, %%mm2 \n\t"
  760. "paddb %%mm1, %%mm2 \n\t" // L + T - LT
  761. "movq %%mm4, %%mm5 \n\t" // L
  762. "pmaxub %%mm1, %%mm4 \n\t" // max(T, L)
  763. "pminub %%mm5, %%mm1 \n\t" // min(T, L)
  764. "pminub %%mm2, %%mm4 \n\t"
  765. "pmaxub %%mm1, %%mm4 \n\t"
  766. "psubb %%mm4, %%mm3 \n\t" // dst - pred
  767. "movq %%mm3, (%3, %0) \n\t"
  768. "add $8, %0 \n\t"
  769. "cmp %4, %0 \n\t"
  770. " jb 1b \n\t"
  771. : "+r" (i)
  772. : "r"(src1), "r"(src2), "r"(dst), "r"((x86_reg)w)
  773. );
  774. l= *left;
  775. lt= *left_top;
  776. dst[0]= src2[0] - mid_pred(l, src1[0], (l + src1[0] - lt)&0xFF);
  777. *left_top= src1[w-1];
  778. *left = src2[w-1];
  779. }
  780. #define MMABS_MMX(a,z)\
  781. "pxor " #z ", " #z " \n\t"\
  782. "pcmpgtw " #a ", " #z " \n\t"\
  783. "pxor " #z ", " #a " \n\t"\
  784. "psubw " #z ", " #a " \n\t"
  785. #define MMABS_MMX2(a,z)\
  786. "pxor " #z ", " #z " \n\t"\
  787. "psubw " #a ", " #z " \n\t"\
  788. "pmaxsw " #z ", " #a " \n\t"
  789. #define MMABS_SSSE3(a,z)\
  790. "pabsw " #a ", " #a " \n\t"
  791. #define MMABS_SUM(a,z, sum)\
  792. MMABS(a,z)\
  793. "paddusw " #a ", " #sum " \n\t"
  794. /* FIXME: HSUM_* saturates at 64k, while an 8x8 hadamard or dct block can get up to
  795. * about 100k on extreme inputs. But that's very unlikely to occur in natural video,
  796. * and it's even more unlikely to not have any alternative mvs/modes with lower cost. */
  797. #define HSUM_MMX(a, t, dst)\
  798. "movq "#a", "#t" \n\t"\
  799. "psrlq $32, "#a" \n\t"\
  800. "paddusw "#t", "#a" \n\t"\
  801. "movq "#a", "#t" \n\t"\
  802. "psrlq $16, "#a" \n\t"\
  803. "paddusw "#t", "#a" \n\t"\
  804. "movd "#a", "#dst" \n\t"\
  805. #define HSUM_MMX2(a, t, dst)\
  806. "pshufw $0x0E, "#a", "#t" \n\t"\
  807. "paddusw "#t", "#a" \n\t"\
  808. "pshufw $0x01, "#a", "#t" \n\t"\
  809. "paddusw "#t", "#a" \n\t"\
  810. "movd "#a", "#dst" \n\t"\
  811. #define HSUM_SSE2(a, t, dst)\
  812. "movhlps "#a", "#t" \n\t"\
  813. "paddusw "#t", "#a" \n\t"\
  814. "pshuflw $0x0E, "#a", "#t" \n\t"\
  815. "paddusw "#t", "#a" \n\t"\
  816. "pshuflw $0x01, "#a", "#t" \n\t"\
  817. "paddusw "#t", "#a" \n\t"\
  818. "movd "#a", "#dst" \n\t"\
  819. #define hadamard_func(cpu) \
  820. int ff_hadamard8_diff_##cpu (void *s, uint8_t *src1, uint8_t *src2, \
  821. int stride, int h); \
  822. int ff_hadamard8_diff16_##cpu(void *s, uint8_t *src1, uint8_t *src2, \
  823. int stride, int h);
  824. hadamard_func(mmx)
  825. hadamard_func(mmx2)
  826. hadamard_func(sse2)
  827. hadamard_func(ssse3)
  828. #define DCT_SAD4(m,mm,o)\
  829. "mov"#m" "#o"+ 0(%1), "#mm"2 \n\t"\
  830. "mov"#m" "#o"+16(%1), "#mm"3 \n\t"\
  831. "mov"#m" "#o"+32(%1), "#mm"4 \n\t"\
  832. "mov"#m" "#o"+48(%1), "#mm"5 \n\t"\
  833. MMABS_SUM(mm##2, mm##6, mm##0)\
  834. MMABS_SUM(mm##3, mm##7, mm##1)\
  835. MMABS_SUM(mm##4, mm##6, mm##0)\
  836. MMABS_SUM(mm##5, mm##7, mm##1)\
  837. #define DCT_SAD_MMX\
  838. "pxor %%mm0, %%mm0 \n\t"\
  839. "pxor %%mm1, %%mm1 \n\t"\
  840. DCT_SAD4(q, %%mm, 0)\
  841. DCT_SAD4(q, %%mm, 8)\
  842. DCT_SAD4(q, %%mm, 64)\
  843. DCT_SAD4(q, %%mm, 72)\
  844. "paddusw %%mm1, %%mm0 \n\t"\
  845. HSUM(%%mm0, %%mm1, %0)
  846. #define DCT_SAD_SSE2\
  847. "pxor %%xmm0, %%xmm0 \n\t"\
  848. "pxor %%xmm1, %%xmm1 \n\t"\
  849. DCT_SAD4(dqa, %%xmm, 0)\
  850. DCT_SAD4(dqa, %%xmm, 64)\
  851. "paddusw %%xmm1, %%xmm0 \n\t"\
  852. HSUM(%%xmm0, %%xmm1, %0)
  853. #define DCT_SAD_FUNC(cpu) \
  854. static int sum_abs_dctelem_##cpu(DCTELEM *block){\
  855. int sum;\
  856. __asm__ volatile(\
  857. DCT_SAD\
  858. :"=r"(sum)\
  859. :"r"(block)\
  860. );\
  861. return sum&0xFFFF;\
  862. }
  863. #define DCT_SAD DCT_SAD_MMX
  864. #define HSUM(a,t,dst) HSUM_MMX(a,t,dst)
  865. #define MMABS(a,z) MMABS_MMX(a,z)
  866. DCT_SAD_FUNC(mmx)
  867. #undef MMABS
  868. #undef HSUM
  869. #define HSUM(a,t,dst) HSUM_MMX2(a,t,dst)
  870. #define MMABS(a,z) MMABS_MMX2(a,z)
  871. DCT_SAD_FUNC(mmx2)
  872. #undef HSUM
  873. #undef DCT_SAD
  874. #define DCT_SAD DCT_SAD_SSE2
  875. #define HSUM(a,t,dst) HSUM_SSE2(a,t,dst)
  876. DCT_SAD_FUNC(sse2)
  877. #undef MMABS
  878. #if HAVE_SSSE3
  879. #define MMABS(a,z) MMABS_SSSE3(a,z)
  880. DCT_SAD_FUNC(ssse3)
  881. #undef MMABS
  882. #endif
  883. #undef HSUM
  884. #undef DCT_SAD
  885. static int ssd_int8_vs_int16_mmx(const int8_t *pix1, const int16_t *pix2, int size){
  886. int sum;
  887. x86_reg i=size;
  888. __asm__ volatile(
  889. "pxor %%mm4, %%mm4 \n"
  890. "1: \n"
  891. "sub $8, %0 \n"
  892. "movq (%2,%0), %%mm2 \n"
  893. "movq (%3,%0,2), %%mm0 \n"
  894. "movq 8(%3,%0,2), %%mm1 \n"
  895. "punpckhbw %%mm2, %%mm3 \n"
  896. "punpcklbw %%mm2, %%mm2 \n"
  897. "psraw $8, %%mm3 \n"
  898. "psraw $8, %%mm2 \n"
  899. "psubw %%mm3, %%mm1 \n"
  900. "psubw %%mm2, %%mm0 \n"
  901. "pmaddwd %%mm1, %%mm1 \n"
  902. "pmaddwd %%mm0, %%mm0 \n"
  903. "paddd %%mm1, %%mm4 \n"
  904. "paddd %%mm0, %%mm4 \n"
  905. "jg 1b \n"
  906. "movq %%mm4, %%mm3 \n"
  907. "psrlq $32, %%mm3 \n"
  908. "paddd %%mm3, %%mm4 \n"
  909. "movd %%mm4, %1 \n"
  910. :"+r"(i), "=r"(sum)
  911. :"r"(pix1), "r"(pix2)
  912. );
  913. return sum;
  914. }
  915. #define PHADDD(a, t)\
  916. "movq "#a", "#t" \n\t"\
  917. "psrlq $32, "#a" \n\t"\
  918. "paddd "#t", "#a" \n\t"
  919. /*
  920. pmulhw: dst[0-15]=(src[0-15]*dst[0-15])[16-31]
  921. pmulhrw: dst[0-15]=(src[0-15]*dst[0-15] + 0x8000)[16-31]
  922. pmulhrsw: dst[0-15]=(src[0-15]*dst[0-15] + 0x4000)[15-30]
  923. */
  924. #define PMULHRW(x, y, s, o)\
  925. "pmulhw " #s ", "#x " \n\t"\
  926. "pmulhw " #s ", "#y " \n\t"\
  927. "paddw " #o ", "#x " \n\t"\
  928. "paddw " #o ", "#y " \n\t"\
  929. "psraw $1, "#x " \n\t"\
  930. "psraw $1, "#y " \n\t"
  931. #define DEF(x) x ## _mmx
  932. #define SET_RND MOVQ_WONE
  933. #define SCALE_OFFSET 1
  934. #include "dsputil_mmx_qns_template.c"
  935. #undef DEF
  936. #undef SET_RND
  937. #undef SCALE_OFFSET
  938. #undef PMULHRW
  939. #define DEF(x) x ## _3dnow
  940. #define SET_RND(x)
  941. #define SCALE_OFFSET 0
  942. #define PMULHRW(x, y, s, o)\
  943. "pmulhrw " #s ", "#x " \n\t"\
  944. "pmulhrw " #s ", "#y " \n\t"
  945. #include "dsputil_mmx_qns_template.c"
  946. #undef DEF
  947. #undef SET_RND
  948. #undef SCALE_OFFSET
  949. #undef PMULHRW
  950. #if HAVE_SSSE3
  951. #undef PHADDD
  952. #define DEF(x) x ## _ssse3
  953. #define SET_RND(x)
  954. #define SCALE_OFFSET -1
  955. #define PHADDD(a, t)\
  956. "pshufw $0x0E, "#a", "#t" \n\t"\
  957. "paddd "#t", "#a" \n\t" /* faster than phaddd on core2 */
  958. #define PMULHRW(x, y, s, o)\
  959. "pmulhrsw " #s ", "#x " \n\t"\
  960. "pmulhrsw " #s ", "#y " \n\t"
  961. #include "dsputil_mmx_qns_template.c"
  962. #undef DEF
  963. #undef SET_RND
  964. #undef SCALE_OFFSET
  965. #undef PMULHRW
  966. #undef PHADDD
  967. #endif //HAVE_SSSE3
  968. void ff_dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx)
  969. {
  970. int mm_flags = av_get_cpu_flags();
  971. int bit_depth = avctx->bits_per_raw_sample;
  972. if (mm_flags & AV_CPU_FLAG_MMX) {
  973. const int dct_algo = avctx->dct_algo;
  974. if (avctx->bits_per_raw_sample <= 8 &&
  975. (dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX)) {
  976. if(mm_flags & AV_CPU_FLAG_SSE2){
  977. c->fdct = ff_fdct_sse2;
  978. }else if(mm_flags & AV_CPU_FLAG_MMX2){
  979. c->fdct = ff_fdct_mmx2;
  980. }else{
  981. c->fdct = ff_fdct_mmx;
  982. }
  983. }
  984. if (bit_depth <= 8)
  985. c->get_pixels = get_pixels_mmx;
  986. c->diff_pixels = diff_pixels_mmx;
  987. c->pix_sum = pix_sum16_mmx;
  988. c->diff_bytes= diff_bytes_mmx;
  989. c->sum_abs_dctelem= sum_abs_dctelem_mmx;
  990. #if HAVE_YASM
  991. c->hadamard8_diff[0]= ff_hadamard8_diff16_mmx;
  992. c->hadamard8_diff[1]= ff_hadamard8_diff_mmx;
  993. #endif
  994. c->pix_norm1 = pix_norm1_mmx;
  995. c->sse[0] = sse16_mmx;
  996. c->sse[1] = sse8_mmx;
  997. c->vsad[4]= vsad_intra16_mmx;
  998. c->nsse[0] = nsse16_mmx;
  999. c->nsse[1] = nsse8_mmx;
  1000. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  1001. c->vsad[0] = vsad16_mmx;
  1002. }
  1003. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  1004. c->try_8x8basis= try_8x8basis_mmx;
  1005. }
  1006. c->add_8x8basis= add_8x8basis_mmx;
  1007. c->ssd_int8_vs_int16 = ssd_int8_vs_int16_mmx;
  1008. if (mm_flags & AV_CPU_FLAG_MMX2) {
  1009. #if HAVE_YASM
  1010. c->hadamard8_diff[0]= ff_hadamard8_diff16_mmx2;
  1011. c->hadamard8_diff[1]= ff_hadamard8_diff_mmx2;
  1012. #endif
  1013. c->sum_abs_dctelem= sum_abs_dctelem_mmx2;
  1014. c->vsad[4]= vsad_intra16_mmx2;
  1015. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  1016. c->vsad[0] = vsad16_mmx2;
  1017. }
  1018. c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_mmx2;
  1019. }
  1020. if(mm_flags & AV_CPU_FLAG_SSE2){
  1021. if (bit_depth <= 8)
  1022. c->get_pixels = get_pixels_sse2;
  1023. c->sum_abs_dctelem= sum_abs_dctelem_sse2;
  1024. #if HAVE_YASM
  1025. c->sse[0] = ff_sse16_sse2;
  1026. #if HAVE_ALIGNED_STACK
  1027. c->hadamard8_diff[0]= ff_hadamard8_diff16_sse2;
  1028. c->hadamard8_diff[1]= ff_hadamard8_diff_sse2;
  1029. #endif
  1030. #endif
  1031. }
  1032. #if HAVE_SSSE3
  1033. if(mm_flags & AV_CPU_FLAG_SSSE3){
  1034. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  1035. c->try_8x8basis= try_8x8basis_ssse3;
  1036. }
  1037. c->add_8x8basis= add_8x8basis_ssse3;
  1038. c->sum_abs_dctelem= sum_abs_dctelem_ssse3;
  1039. #if HAVE_YASM && HAVE_ALIGNED_STACK
  1040. c->hadamard8_diff[0]= ff_hadamard8_diff16_ssse3;
  1041. c->hadamard8_diff[1]= ff_hadamard8_diff_ssse3;
  1042. #endif
  1043. }
  1044. #endif
  1045. if(mm_flags & AV_CPU_FLAG_3DNOW){
  1046. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  1047. c->try_8x8basis= try_8x8basis_3dnow;
  1048. }
  1049. c->add_8x8basis= add_8x8basis_3dnow;
  1050. }
  1051. }
  1052. ff_dsputil_init_pix_mmx(c, avctx);
  1053. }