You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

424 lines
9.8KB

  1. ;******************************************************************************
  2. ;* MMX optimized DSP utils
  3. ;* Copyright (c) 2008 Loren Merritt
  4. ;*
  5. ;* This file is part of FFmpeg.
  6. ;*
  7. ;* FFmpeg is free software; you can redistribute it and/or
  8. ;* modify it under the terms of the GNU Lesser General Public
  9. ;* License as published by the Free Software Foundation; either
  10. ;* version 2.1 of the License, or (at your option) any later version.
  11. ;*
  12. ;* FFmpeg is distributed in the hope that it will be useful,
  13. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. ;* Lesser General Public License for more details.
  16. ;*
  17. ;* You should have received a copy of the GNU Lesser General Public
  18. ;* License along with FFmpeg; if not, write to the Free Software
  19. ;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. ;******************************************************************************
  21. %include "x86inc.asm"
  22. SECTION_RODATA
  23. pb_f: times 16 db 15
  24. pb_zzzzzzzz77777777: times 8 db -1
  25. pb_7: times 8 db 7
  26. pb_zzzz3333zzzzbbbb: db -1,-1,-1,-1,3,3,3,3,-1,-1,-1,-1,11,11,11,11
  27. pb_zz11zz55zz99zzdd: db -1,-1,1,1,-1,-1,5,5,-1,-1,9,9,-1,-1,13,13
  28. section .text align=16
  29. %macro PSWAPD_SSE 2
  30. pshufw %1, %2, 0x4e
  31. %endmacro
  32. %macro PSWAPD_3DN1 2
  33. movq %1, %2
  34. psrlq %1, 32
  35. punpckldq %1, %2
  36. %endmacro
  37. %macro FLOAT_TO_INT16_INTERLEAVE6 1
  38. ; void float_to_int16_interleave6_sse(int16_t *dst, const float **src, int len)
  39. cglobal float_to_int16_interleave6_%1, 2,7,0, dst, src, src1, src2, src3, src4, src5
  40. %ifdef ARCH_X86_64
  41. %define lend r10d
  42. mov lend, r2d
  43. %else
  44. %define lend dword r2m
  45. %endif
  46. mov src1q, [srcq+1*gprsize]
  47. mov src2q, [srcq+2*gprsize]
  48. mov src3q, [srcq+3*gprsize]
  49. mov src4q, [srcq+4*gprsize]
  50. mov src5q, [srcq+5*gprsize]
  51. mov srcq, [srcq]
  52. sub src1q, srcq
  53. sub src2q, srcq
  54. sub src3q, srcq
  55. sub src4q, srcq
  56. sub src5q, srcq
  57. .loop:
  58. cvtps2pi mm0, [srcq]
  59. cvtps2pi mm1, [srcq+src1q]
  60. cvtps2pi mm2, [srcq+src2q]
  61. cvtps2pi mm3, [srcq+src3q]
  62. cvtps2pi mm4, [srcq+src4q]
  63. cvtps2pi mm5, [srcq+src5q]
  64. packssdw mm0, mm3
  65. packssdw mm1, mm4
  66. packssdw mm2, mm5
  67. pswapd mm3, mm0
  68. punpcklwd mm0, mm1
  69. punpckhwd mm1, mm2
  70. punpcklwd mm2, mm3
  71. pswapd mm3, mm0
  72. punpckldq mm0, mm2
  73. punpckhdq mm2, mm1
  74. punpckldq mm1, mm3
  75. movq [dstq ], mm0
  76. movq [dstq+16], mm2
  77. movq [dstq+ 8], mm1
  78. add srcq, 8
  79. add dstq, 24
  80. sub lend, 2
  81. jg .loop
  82. emms
  83. RET
  84. %endmacro ; FLOAT_TO_INT16_INTERLEAVE6
  85. %define pswapd PSWAPD_SSE
  86. FLOAT_TO_INT16_INTERLEAVE6 sse
  87. %define cvtps2pi pf2id
  88. %define pswapd PSWAPD_3DN1
  89. FLOAT_TO_INT16_INTERLEAVE6 3dnow
  90. %undef pswapd
  91. FLOAT_TO_INT16_INTERLEAVE6 3dn2
  92. %undef cvtps2pi
  93. %macro SCALARPRODUCT 1
  94. ; int scalarproduct_int16(int16_t *v1, int16_t *v2, int order, int shift)
  95. cglobal scalarproduct_int16_%1, 3,3,4, v1, v2, order, shift
  96. shl orderq, 1
  97. add v1q, orderq
  98. add v2q, orderq
  99. neg orderq
  100. movd m3, shiftm
  101. pxor m2, m2
  102. .loop:
  103. movu m0, [v1q + orderq]
  104. movu m1, [v1q + orderq + mmsize]
  105. pmaddwd m0, [v2q + orderq]
  106. pmaddwd m1, [v2q + orderq + mmsize]
  107. paddd m2, m0
  108. paddd m2, m1
  109. add orderq, mmsize*2
  110. jl .loop
  111. %if mmsize == 16
  112. movhlps m0, m2
  113. paddd m2, m0
  114. psrad m2, m3
  115. pshuflw m0, m2, 0x4e
  116. %else
  117. psrad m2, m3
  118. pshufw m0, m2, 0x4e
  119. %endif
  120. paddd m2, m0
  121. movd eax, m2
  122. RET
  123. ; int scalarproduct_and_madd_int16(int16_t *v1, int16_t *v2, int16_t *v3, int order, int mul)
  124. cglobal scalarproduct_and_madd_int16_%1, 4,4,8, v1, v2, v3, order, mul
  125. shl orderq, 1
  126. movd m7, mulm
  127. %if mmsize == 16
  128. pshuflw m7, m7, 0
  129. punpcklqdq m7, m7
  130. %else
  131. pshufw m7, m7, 0
  132. %endif
  133. pxor m6, m6
  134. add v1q, orderq
  135. add v2q, orderq
  136. add v3q, orderq
  137. neg orderq
  138. .loop:
  139. movu m0, [v2q + orderq]
  140. movu m1, [v2q + orderq + mmsize]
  141. mova m4, [v1q + orderq]
  142. mova m5, [v1q + orderq + mmsize]
  143. movu m2, [v3q + orderq]
  144. movu m3, [v3q + orderq + mmsize]
  145. pmaddwd m0, m4
  146. pmaddwd m1, m5
  147. pmullw m2, m7
  148. pmullw m3, m7
  149. paddd m6, m0
  150. paddd m6, m1
  151. paddw m2, m4
  152. paddw m3, m5
  153. mova [v1q + orderq], m2
  154. mova [v1q + orderq + mmsize], m3
  155. add orderq, mmsize*2
  156. jl .loop
  157. %if mmsize == 16
  158. movhlps m0, m6
  159. paddd m6, m0
  160. pshuflw m0, m6, 0x4e
  161. %else
  162. pshufw m0, m6, 0x4e
  163. %endif
  164. paddd m6, m0
  165. movd eax, m6
  166. RET
  167. %endmacro
  168. INIT_MMX
  169. SCALARPRODUCT mmx2
  170. INIT_XMM
  171. SCALARPRODUCT sse2
  172. %macro SCALARPRODUCT_LOOP 1
  173. align 16
  174. .loop%1:
  175. sub orderq, mmsize*2
  176. %if %1
  177. mova m1, m4
  178. mova m4, [v2q + orderq]
  179. mova m0, [v2q + orderq + mmsize]
  180. palignr m1, m0, %1
  181. palignr m0, m4, %1
  182. mova m3, m5
  183. mova m5, [v3q + orderq]
  184. mova m2, [v3q + orderq + mmsize]
  185. palignr m3, m2, %1
  186. palignr m2, m5, %1
  187. %else
  188. mova m0, [v2q + orderq]
  189. mova m1, [v2q + orderq + mmsize]
  190. mova m2, [v3q + orderq]
  191. mova m3, [v3q + orderq + mmsize]
  192. %endif
  193. %define t0 [v1q + orderq]
  194. %define t1 [v1q + orderq + mmsize]
  195. %ifdef ARCH_X86_64
  196. mova m8, t0
  197. mova m9, t1
  198. %define t0 m8
  199. %define t1 m9
  200. %endif
  201. pmaddwd m0, t0
  202. pmaddwd m1, t1
  203. pmullw m2, m7
  204. pmullw m3, m7
  205. paddw m2, t0
  206. paddw m3, t1
  207. paddd m6, m0
  208. paddd m6, m1
  209. mova [v1q + orderq], m2
  210. mova [v1q + orderq + mmsize], m3
  211. jg .loop%1
  212. %if %1
  213. jmp .end
  214. %endif
  215. %endmacro
  216. ; int scalarproduct_and_madd_int16(int16_t *v1, int16_t *v2, int16_t *v3, int order, int mul)
  217. cglobal scalarproduct_and_madd_int16_ssse3, 4,5,10, v1, v2, v3, order, mul
  218. shl orderq, 1
  219. movd m7, mulm
  220. pshuflw m7, m7, 0
  221. punpcklqdq m7, m7
  222. pxor m6, m6
  223. mov r4d, v2d
  224. and r4d, 15
  225. and v2q, ~15
  226. and v3q, ~15
  227. mova m4, [v2q + orderq]
  228. mova m5, [v3q + orderq]
  229. ; linear is faster than branch tree or jump table, because the branches taken are cyclic (i.e. predictable)
  230. cmp r4d, 0
  231. je .loop0
  232. cmp r4d, 2
  233. je .loop2
  234. cmp r4d, 4
  235. je .loop4
  236. cmp r4d, 6
  237. je .loop6
  238. cmp r4d, 8
  239. je .loop8
  240. cmp r4d, 10
  241. je .loop10
  242. cmp r4d, 12
  243. je .loop12
  244. SCALARPRODUCT_LOOP 14
  245. SCALARPRODUCT_LOOP 12
  246. SCALARPRODUCT_LOOP 10
  247. SCALARPRODUCT_LOOP 8
  248. SCALARPRODUCT_LOOP 6
  249. SCALARPRODUCT_LOOP 4
  250. SCALARPRODUCT_LOOP 2
  251. SCALARPRODUCT_LOOP 0
  252. .end:
  253. movhlps m0, m6
  254. paddd m6, m0
  255. pshuflw m0, m6, 0x4e
  256. paddd m6, m0
  257. movd eax, m6
  258. RET
  259. ; void add_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top)
  260. cglobal add_hfyu_median_prediction_mmx2, 6,6,0, dst, top, diff, w, left, left_top
  261. movq mm0, [topq]
  262. movq mm2, mm0
  263. movd mm4, [left_topq]
  264. psllq mm2, 8
  265. movq mm1, mm0
  266. por mm4, mm2
  267. movd mm3, [leftq]
  268. psubb mm0, mm4 ; t-tl
  269. add dstq, wq
  270. add topq, wq
  271. add diffq, wq
  272. neg wq
  273. jmp .skip
  274. .loop:
  275. movq mm4, [topq+wq]
  276. movq mm0, mm4
  277. psllq mm4, 8
  278. por mm4, mm1
  279. movq mm1, mm0 ; t
  280. psubb mm0, mm4 ; t-tl
  281. .skip:
  282. movq mm2, [diffq+wq]
  283. %assign i 0
  284. %rep 8
  285. movq mm4, mm0
  286. paddb mm4, mm3 ; t-tl+l
  287. movq mm5, mm3
  288. pmaxub mm3, mm1
  289. pminub mm5, mm1
  290. pminub mm3, mm4
  291. pmaxub mm3, mm5 ; median
  292. paddb mm3, mm2 ; +residual
  293. %if i==0
  294. movq mm7, mm3
  295. psllq mm7, 56
  296. %else
  297. movq mm6, mm3
  298. psrlq mm7, 8
  299. psllq mm6, 56
  300. por mm7, mm6
  301. %endif
  302. %if i<7
  303. psrlq mm0, 8
  304. psrlq mm1, 8
  305. psrlq mm2, 8
  306. %endif
  307. %assign i i+1
  308. %endrep
  309. movq [dstq+wq], mm7
  310. add wq, 8
  311. jl .loop
  312. movzx r2d, byte [dstq-1]
  313. mov [leftq], r2d
  314. movzx r2d, byte [topq-1]
  315. mov [left_topq], r2d
  316. RET
  317. %macro ADD_HFYU_LEFT_LOOP 1 ; %1 = is_aligned
  318. add srcq, wq
  319. add dstq, wq
  320. neg wq
  321. %%.loop:
  322. mova m1, [srcq+wq]
  323. mova m2, m1
  324. psllw m1, 8
  325. paddb m1, m2
  326. mova m2, m1
  327. pshufb m1, m3
  328. paddb m1, m2
  329. pshufb m0, m5
  330. mova m2, m1
  331. pshufb m1, m4
  332. paddb m1, m2
  333. %if mmsize == 16
  334. mova m2, m1
  335. pshufb m1, m6
  336. paddb m1, m2
  337. %endif
  338. paddb m0, m1
  339. %if %1
  340. mova [dstq+wq], m0
  341. %else
  342. movq [dstq+wq], m0
  343. movhps [dstq+wq+8], m0
  344. %endif
  345. add wq, mmsize
  346. jl %%.loop
  347. mov eax, mmsize-1
  348. sub eax, wd
  349. movd m1, eax
  350. pshufb m0, m1
  351. movd eax, m0
  352. RET
  353. %endmacro
  354. ; int add_hfyu_left_prediction(uint8_t *dst, const uint8_t *src, int w, int left)
  355. INIT_MMX
  356. cglobal add_hfyu_left_prediction_ssse3, 3,3,7, dst, src, w, left
  357. .skip_prologue:
  358. mova m5, [pb_7]
  359. mova m4, [pb_zzzz3333zzzzbbbb]
  360. mova m3, [pb_zz11zz55zz99zzdd]
  361. movd m0, leftm
  362. psllq m0, 56
  363. ADD_HFYU_LEFT_LOOP 1
  364. INIT_XMM
  365. cglobal add_hfyu_left_prediction_sse4, 3,3,7, dst, src, w, left
  366. mova m5, [pb_f]
  367. mova m6, [pb_zzzzzzzz77777777]
  368. mova m4, [pb_zzzz3333zzzzbbbb]
  369. mova m3, [pb_zz11zz55zz99zzdd]
  370. movd m0, leftm
  371. pslldq m0, 15
  372. test srcq, 15
  373. jnz add_hfyu_left_prediction_ssse3.skip_prologue
  374. test dstq, 15
  375. jnz .unaligned
  376. ADD_HFYU_LEFT_LOOP 1
  377. .unaligned:
  378. ADD_HFYU_LEFT_LOOP 0
  379. ; float scalarproduct_float_sse(const float *v1, const float *v2, int len)
  380. cglobal scalarproduct_float_sse, 3,3,2, v1, v2, offset
  381. neg offsetq
  382. shl offsetq, 2
  383. sub v1q, offsetq
  384. sub v2q, offsetq
  385. xorps xmm0, xmm0
  386. .loop:
  387. movaps xmm1, [v1q+offsetq]
  388. mulps xmm1, [v2q+offsetq]
  389. addps xmm0, xmm1
  390. add offsetq, 16
  391. js .loop
  392. movhlps xmm1, xmm0
  393. addps xmm0, xmm1
  394. movss xmm1, xmm0
  395. shufps xmm0, xmm0, 1
  396. addss xmm0, xmm1
  397. %ifndef ARCH_X86_64
  398. movd r0m, xmm0
  399. fld dword r0m
  400. %endif
  401. RET