You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

302 lines
7.2KB

  1. ;******************************************************************************
  2. ;* MMX optimized DSP utils
  3. ;* Copyright (c) 2008 Loren Merritt
  4. ;*
  5. ;* This file is part of FFmpeg.
  6. ;*
  7. ;* FFmpeg is free software; you can redistribute it and/or
  8. ;* modify it under the terms of the GNU Lesser General Public
  9. ;* License as published by the Free Software Foundation; either
  10. ;* version 2.1 of the License, or (at your option) any later version.
  11. ;*
  12. ;* FFmpeg is distributed in the hope that it will be useful,
  13. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. ;* Lesser General Public License for more details.
  16. ;*
  17. ;* You should have received a copy of the GNU Lesser General Public
  18. ;* License along with FFmpeg; if not, write to the Free Software
  19. ;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. ;******************************************************************************
  21. %include "x86inc.asm"
  22. SECTION_RODATA
  23. pb_f: times 16 db 15
  24. pb_zzzzzzzz77777777: times 8 db -1
  25. pb_7: times 8 db 7
  26. pb_zzzz3333zzzzbbbb: db -1,-1,-1,-1,3,3,3,3,-1,-1,-1,-1,11,11,11,11
  27. pb_zz11zz55zz99zzdd: db -1,-1,1,1,-1,-1,5,5,-1,-1,9,9,-1,-1,13,13
  28. section .text align=16
  29. %macro PSWAPD_SSE 2
  30. pshufw %1, %2, 0x4e
  31. %endmacro
  32. %macro PSWAPD_3DN1 2
  33. movq %1, %2
  34. psrlq %1, 32
  35. punpckldq %1, %2
  36. %endmacro
  37. %macro FLOAT_TO_INT16_INTERLEAVE6 1
  38. ; void ff_float_to_int16_interleave6_sse(int16_t *dst, const float **src, int len)
  39. cglobal float_to_int16_interleave6_%1, 2,7,0, dst, src, src1, src2, src3, src4, src5
  40. %ifdef ARCH_X86_64
  41. %define lend r10d
  42. mov lend, r2d
  43. %else
  44. %define lend dword r2m
  45. %endif
  46. mov src1q, [srcq+1*gprsize]
  47. mov src2q, [srcq+2*gprsize]
  48. mov src3q, [srcq+3*gprsize]
  49. mov src4q, [srcq+4*gprsize]
  50. mov src5q, [srcq+5*gprsize]
  51. mov srcq, [srcq]
  52. sub src1q, srcq
  53. sub src2q, srcq
  54. sub src3q, srcq
  55. sub src4q, srcq
  56. sub src5q, srcq
  57. .loop:
  58. cvtps2pi mm0, [srcq]
  59. cvtps2pi mm1, [srcq+src1q]
  60. cvtps2pi mm2, [srcq+src2q]
  61. cvtps2pi mm3, [srcq+src3q]
  62. cvtps2pi mm4, [srcq+src4q]
  63. cvtps2pi mm5, [srcq+src5q]
  64. packssdw mm0, mm3
  65. packssdw mm1, mm4
  66. packssdw mm2, mm5
  67. pswapd mm3, mm0
  68. punpcklwd mm0, mm1
  69. punpckhwd mm1, mm2
  70. punpcklwd mm2, mm3
  71. pswapd mm3, mm0
  72. punpckldq mm0, mm2
  73. punpckhdq mm2, mm1
  74. punpckldq mm1, mm3
  75. movq [dstq ], mm0
  76. movq [dstq+16], mm2
  77. movq [dstq+ 8], mm1
  78. add srcq, 8
  79. add dstq, 24
  80. sub lend, 2
  81. jg .loop
  82. emms
  83. RET
  84. %endmacro ; FLOAT_TO_INT16_INTERLEAVE6
  85. %define pswapd PSWAPD_SSE
  86. FLOAT_TO_INT16_INTERLEAVE6 sse
  87. %define cvtps2pi pf2id
  88. %define pswapd PSWAPD_3DN1
  89. FLOAT_TO_INT16_INTERLEAVE6 3dnow
  90. %undef pswapd
  91. FLOAT_TO_INT16_INTERLEAVE6 3dn2
  92. %undef cvtps2pi
  93. %macro SCALARPRODUCT 1
  94. ; void add_int16(int16_t * v1, int16_t * v2, int order)
  95. cglobal add_int16_%1, 3,3,2, v1, v2, order
  96. shl orderq, 1
  97. add v1q, orderq
  98. add v2q, orderq
  99. neg orderq
  100. .loop:
  101. movu m0, [v2q + orderq]
  102. movu m1, [v2q + orderq + mmsize]
  103. paddw m0, [v1q + orderq]
  104. paddw m1, [v1q + orderq + mmsize]
  105. mova [v1q + orderq], m0
  106. mova [v1q + orderq + mmsize], m1
  107. add orderq, mmsize*2
  108. jl .loop
  109. REP_RET
  110. ; void sub_int16(int16_t * v1, int16_t * v2, int order)
  111. cglobal sub_int16_%1, 3,3,4, v1, v2, order
  112. shl orderq, 1
  113. add v1q, orderq
  114. add v2q, orderq
  115. neg orderq
  116. .loop:
  117. movu m2, [v2q + orderq]
  118. movu m3, [v2q + orderq + mmsize]
  119. mova m0, [v1q + orderq]
  120. mova m1, [v1q + orderq + mmsize]
  121. psubw m0, m2
  122. psubw m1, m3
  123. mova [v1q + orderq], m0
  124. mova [v1q + orderq + mmsize], m1
  125. add orderq, mmsize*2
  126. jl .loop
  127. REP_RET
  128. ; int scalarproduct_int16_sse2(int16_t * v1, int16_t * v2, int order, int shift)
  129. cglobal scalarproduct_int16_%1, 3,3,4, v1, v2, order, shift
  130. shl orderq, 1
  131. add v1q, orderq
  132. add v2q, orderq
  133. neg orderq
  134. movd m3, shiftm
  135. pxor m2, m2
  136. .loop:
  137. movu m0, [v1q + orderq]
  138. movu m1, [v1q + orderq + mmsize]
  139. pmaddwd m0, [v2q + orderq]
  140. pmaddwd m1, [v2q + orderq + mmsize]
  141. paddd m2, m0
  142. paddd m2, m1
  143. add orderq, mmsize*2
  144. jl .loop
  145. %if mmsize == 16
  146. movhlps m0, m2
  147. paddd m2, m0
  148. psrad m2, m3
  149. pshuflw m0, m2, 0x4e
  150. %else
  151. psrad m2, m3
  152. pshufw m0, m2, 0x4e
  153. %endif
  154. paddd m2, m0
  155. movd eax, m2
  156. RET
  157. %endmacro
  158. INIT_MMX
  159. SCALARPRODUCT mmx2
  160. INIT_XMM
  161. SCALARPRODUCT sse2
  162. ; void ff_add_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top)
  163. cglobal add_hfyu_median_prediction_mmx2, 6,6,0, dst, top, diff, w, left, left_top
  164. movq mm0, [topq]
  165. movq mm2, mm0
  166. movd mm4, [left_topq]
  167. psllq mm2, 8
  168. movq mm1, mm0
  169. por mm4, mm2
  170. movd mm3, [leftq]
  171. psubb mm0, mm4 ; t-tl
  172. add dstq, wq
  173. add topq, wq
  174. add diffq, wq
  175. neg wq
  176. jmp .skip
  177. .loop:
  178. movq mm4, [topq+wq]
  179. movq mm0, mm4
  180. psllq mm4, 8
  181. por mm4, mm1
  182. movq mm1, mm0 ; t
  183. psubb mm0, mm4 ; t-tl
  184. .skip:
  185. movq mm2, [diffq+wq]
  186. %assign i 0
  187. %rep 8
  188. movq mm4, mm0
  189. paddb mm4, mm3 ; t-tl+l
  190. movq mm5, mm3
  191. pmaxub mm3, mm1
  192. pminub mm5, mm1
  193. pminub mm3, mm4
  194. pmaxub mm3, mm5 ; median
  195. paddb mm3, mm2 ; +residual
  196. %if i==0
  197. movq mm7, mm3
  198. psllq mm7, 56
  199. %else
  200. movq mm6, mm3
  201. psrlq mm7, 8
  202. psllq mm6, 56
  203. por mm7, mm6
  204. %endif
  205. %if i<7
  206. psrlq mm0, 8
  207. psrlq mm1, 8
  208. psrlq mm2, 8
  209. %endif
  210. %assign i i+1
  211. %endrep
  212. movq [dstq+wq], mm7
  213. add wq, 8
  214. jl .loop
  215. movzx r2d, byte [dstq-1]
  216. mov [leftq], r2d
  217. movzx r2d, byte [topq-1]
  218. mov [left_topq], r2d
  219. RET
  220. %macro ADD_HFYU_LEFT_LOOP 1 ; %1 = is_aligned
  221. add srcq, wq
  222. add dstq, wq
  223. neg wq
  224. %%.loop:
  225. mova m1, [srcq+wq]
  226. mova m2, m1
  227. psllw m1, 8
  228. paddb m1, m2
  229. mova m2, m1
  230. pshufb m1, m3
  231. paddb m1, m2
  232. pshufb m0, m5
  233. mova m2, m1
  234. pshufb m1, m4
  235. paddb m1, m2
  236. %if mmsize == 16
  237. mova m2, m1
  238. pshufb m1, m6
  239. paddb m1, m2
  240. %endif
  241. paddb m0, m1
  242. %if %1
  243. mova [dstq+wq], m0
  244. %else
  245. movq [dstq+wq], m0
  246. movhps [dstq+wq+8], m0
  247. %endif
  248. add wq, mmsize
  249. jl %%.loop
  250. mov eax, mmsize-1
  251. sub eax, wd
  252. movd m1, eax
  253. pshufb m0, m1
  254. movd eax, m0
  255. RET
  256. %endmacro
  257. ; int ff_add_hfyu_left_prediction(uint8_t *dst, const uint8_t *src, int w, int left)
  258. INIT_MMX
  259. cglobal add_hfyu_left_prediction_ssse3, 3,3,7, dst, src, w, left
  260. .skip_prologue:
  261. mova m5, [pb_7 GLOBAL]
  262. mova m4, [pb_zzzz3333zzzzbbbb GLOBAL]
  263. mova m3, [pb_zz11zz55zz99zzdd GLOBAL]
  264. movd m0, leftm
  265. psllq m0, 56
  266. ADD_HFYU_LEFT_LOOP 1
  267. INIT_XMM
  268. cglobal add_hfyu_left_prediction_sse4, 3,3,7, dst, src, w, left
  269. mova m5, [pb_f GLOBAL]
  270. mova m6, [pb_zzzzzzzz77777777 GLOBAL]
  271. mova m4, [pb_zzzz3333zzzzbbbb GLOBAL]
  272. mova m3, [pb_zz11zz55zz99zzdd GLOBAL]
  273. movd m0, leftm
  274. pslldq m0, 15
  275. test srcq, 15
  276. jnz add_hfyu_left_prediction_ssse3.skip_prologue
  277. test dstq, 15
  278. jnz .unaligned
  279. ADD_HFYU_LEFT_LOOP 1
  280. .unaligned:
  281. ADD_HFYU_LEFT_LOOP 0