You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

656 lines
16KB

  1. ;******************************************************************************
  2. ;* MMX optimized DSP utils
  3. ;* Copyright (c) 2008 Loren Merritt
  4. ;*
  5. ;* This file is part of Libav.
  6. ;*
  7. ;* Libav is free software; you can redistribute it and/or
  8. ;* modify it under the terms of the GNU Lesser General Public
  9. ;* License as published by the Free Software Foundation; either
  10. ;* version 2.1 of the License, or (at your option) any later version.
  11. ;*
  12. ;* Libav is distributed in the hope that it will be useful,
  13. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. ;* Lesser General Public License for more details.
  16. ;*
  17. ;* You should have received a copy of the GNU Lesser General Public
  18. ;* License along with Libav; if not, write to the Free Software
  19. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. ;******************************************************************************
  21. %include "libavutil/x86/x86util.asm"
  22. SECTION_RODATA
  23. pb_f: times 16 db 15
  24. pb_zzzzzzzz77777777: times 8 db -1
  25. pb_7: times 8 db 7
  26. pb_zzzz3333zzzzbbbb: db -1,-1,-1,-1,3,3,3,3,-1,-1,-1,-1,11,11,11,11
  27. pb_zz11zz55zz99zzdd: db -1,-1,1,1,-1,-1,5,5,-1,-1,9,9,-1,-1,13,13
  28. pb_revwords: SHUFFLE_MASK_W 7, 6, 5, 4, 3, 2, 1, 0
  29. pd_16384: times 4 dd 16384
  30. pb_bswap32: db 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
  31. SECTION_TEXT
  32. %macro SCALARPRODUCT 0
  33. ; int ff_scalarproduct_int16(int16_t *v1, int16_t *v2, int order)
  34. cglobal scalarproduct_int16, 3,3,3, v1, v2, order
  35. shl orderq, 1
  36. add v1q, orderq
  37. add v2q, orderq
  38. neg orderq
  39. pxor m2, m2
  40. .loop:
  41. movu m0, [v1q + orderq]
  42. movu m1, [v1q + orderq + mmsize]
  43. pmaddwd m0, [v2q + orderq]
  44. pmaddwd m1, [v2q + orderq + mmsize]
  45. paddd m2, m0
  46. paddd m2, m1
  47. add orderq, mmsize*2
  48. jl .loop
  49. %if mmsize == 16
  50. movhlps m0, m2
  51. paddd m2, m0
  52. pshuflw m0, m2, 0x4e
  53. %else
  54. pshufw m0, m2, 0x4e
  55. %endif
  56. paddd m2, m0
  57. movd eax, m2
  58. RET
  59. ; int ff_scalarproduct_and_madd_int16(int16_t *v1, int16_t *v2, int16_t *v3,
  60. ; int order, int mul)
  61. cglobal scalarproduct_and_madd_int16, 4,4,8, v1, v2, v3, order, mul
  62. shl orderq, 1
  63. movd m7, mulm
  64. %if mmsize == 16
  65. pshuflw m7, m7, 0
  66. punpcklqdq m7, m7
  67. %else
  68. pshufw m7, m7, 0
  69. %endif
  70. pxor m6, m6
  71. add v1q, orderq
  72. add v2q, orderq
  73. add v3q, orderq
  74. neg orderq
  75. .loop:
  76. movu m0, [v2q + orderq]
  77. movu m1, [v2q + orderq + mmsize]
  78. mova m4, [v1q + orderq]
  79. mova m5, [v1q + orderq + mmsize]
  80. movu m2, [v3q + orderq]
  81. movu m3, [v3q + orderq + mmsize]
  82. pmaddwd m0, m4
  83. pmaddwd m1, m5
  84. pmullw m2, m7
  85. pmullw m3, m7
  86. paddd m6, m0
  87. paddd m6, m1
  88. paddw m2, m4
  89. paddw m3, m5
  90. mova [v1q + orderq], m2
  91. mova [v1q + orderq + mmsize], m3
  92. add orderq, mmsize*2
  93. jl .loop
  94. %if mmsize == 16
  95. movhlps m0, m6
  96. paddd m6, m0
  97. pshuflw m0, m6, 0x4e
  98. %else
  99. pshufw m0, m6, 0x4e
  100. %endif
  101. paddd m6, m0
  102. movd eax, m6
  103. RET
  104. %endmacro
  105. INIT_MMX mmxext
  106. SCALARPRODUCT
  107. INIT_XMM sse2
  108. SCALARPRODUCT
  109. %macro SCALARPRODUCT_LOOP 1
  110. align 16
  111. .loop%1:
  112. sub orderq, mmsize*2
  113. %if %1
  114. mova m1, m4
  115. mova m4, [v2q + orderq]
  116. mova m0, [v2q + orderq + mmsize]
  117. palignr m1, m0, %1
  118. palignr m0, m4, %1
  119. mova m3, m5
  120. mova m5, [v3q + orderq]
  121. mova m2, [v3q + orderq + mmsize]
  122. palignr m3, m2, %1
  123. palignr m2, m5, %1
  124. %else
  125. mova m0, [v2q + orderq]
  126. mova m1, [v2q + orderq + mmsize]
  127. mova m2, [v3q + orderq]
  128. mova m3, [v3q + orderq + mmsize]
  129. %endif
  130. %define t0 [v1q + orderq]
  131. %define t1 [v1q + orderq + mmsize]
  132. %if ARCH_X86_64
  133. mova m8, t0
  134. mova m9, t1
  135. %define t0 m8
  136. %define t1 m9
  137. %endif
  138. pmaddwd m0, t0
  139. pmaddwd m1, t1
  140. pmullw m2, m7
  141. pmullw m3, m7
  142. paddw m2, t0
  143. paddw m3, t1
  144. paddd m6, m0
  145. paddd m6, m1
  146. mova [v1q + orderq], m2
  147. mova [v1q + orderq + mmsize], m3
  148. jg .loop%1
  149. %if %1
  150. jmp .end
  151. %endif
  152. %endmacro
  153. ; int ff_scalarproduct_and_madd_int16(int16_t *v1, int16_t *v2, int16_t *v3,
  154. ; int order, int mul)
  155. INIT_XMM ssse3
  156. cglobal scalarproduct_and_madd_int16, 4,5,10, v1, v2, v3, order, mul
  157. shl orderq, 1
  158. movd m7, mulm
  159. pshuflw m7, m7, 0
  160. punpcklqdq m7, m7
  161. pxor m6, m6
  162. mov r4d, v2d
  163. and r4d, 15
  164. and v2q, ~15
  165. and v3q, ~15
  166. mova m4, [v2q + orderq]
  167. mova m5, [v3q + orderq]
  168. ; linear is faster than branch tree or jump table, because the branches taken are cyclic (i.e. predictable)
  169. cmp r4d, 0
  170. je .loop0
  171. cmp r4d, 2
  172. je .loop2
  173. cmp r4d, 4
  174. je .loop4
  175. cmp r4d, 6
  176. je .loop6
  177. cmp r4d, 8
  178. je .loop8
  179. cmp r4d, 10
  180. je .loop10
  181. cmp r4d, 12
  182. je .loop12
  183. SCALARPRODUCT_LOOP 14
  184. SCALARPRODUCT_LOOP 12
  185. SCALARPRODUCT_LOOP 10
  186. SCALARPRODUCT_LOOP 8
  187. SCALARPRODUCT_LOOP 6
  188. SCALARPRODUCT_LOOP 4
  189. SCALARPRODUCT_LOOP 2
  190. SCALARPRODUCT_LOOP 0
  191. .end:
  192. movhlps m0, m6
  193. paddd m6, m0
  194. pshuflw m0, m6, 0x4e
  195. paddd m6, m0
  196. movd eax, m6
  197. RET
  198. ;-----------------------------------------------------------------------------
  199. ; void ff_apply_window_int16(int16_t *output, const int16_t *input,
  200. ; const int16_t *window, unsigned int len)
  201. ;-----------------------------------------------------------------------------
  202. %macro REVERSE_WORDS 1-2
  203. %if cpuflag(ssse3) && notcpuflag(atom)
  204. pshufb %1, %2
  205. %elif cpuflag(sse2)
  206. pshuflw %1, %1, 0x1B
  207. pshufhw %1, %1, 0x1B
  208. pshufd %1, %1, 0x4E
  209. %elif cpuflag(mmxext)
  210. pshufw %1, %1, 0x1B
  211. %endif
  212. %endmacro
  213. %macro MUL16FIXED 3
  214. %if cpuflag(ssse3) ; dst, src, unused
  215. ; dst = ((dst * src) + (1<<14)) >> 15
  216. pmulhrsw %1, %2
  217. %elif cpuflag(mmxext) ; dst, src, temp
  218. ; dst = (dst * src) >> 15
  219. ; pmulhw cuts off the bottom bit, so we have to lshift by 1 and add it back
  220. ; in from the pmullw result.
  221. mova %3, %1
  222. pmulhw %1, %2
  223. pmullw %3, %2
  224. psrlw %3, 15
  225. psllw %1, 1
  226. por %1, %3
  227. %endif
  228. %endmacro
  229. %macro APPLY_WINDOW_INT16 1 ; %1 bitexact version
  230. %if %1
  231. cglobal apply_window_int16, 4,5,6, output, input, window, offset, offset2
  232. %else
  233. cglobal apply_window_int16_round, 4,5,6, output, input, window, offset, offset2
  234. %endif
  235. lea offset2q, [offsetq-mmsize]
  236. %if cpuflag(ssse3) && notcpuflag(atom)
  237. mova m5, [pb_revwords]
  238. ALIGN 16
  239. %elif %1
  240. mova m5, [pd_16384]
  241. %endif
  242. .loop:
  243. %if cpuflag(ssse3)
  244. ; This version does the 16x16->16 multiplication in-place without expanding
  245. ; to 32-bit. The ssse3 version is bit-identical.
  246. mova m0, [windowq+offset2q]
  247. mova m1, [ inputq+offset2q]
  248. pmulhrsw m1, m0
  249. REVERSE_WORDS m0, m5
  250. pmulhrsw m0, [ inputq+offsetq ]
  251. mova [outputq+offset2q], m1
  252. mova [outputq+offsetq ], m0
  253. %elif %1
  254. ; This version expands 16-bit to 32-bit, multiplies by the window,
  255. ; adds 16384 for rounding, right shifts 15, then repacks back to words to
  256. ; save to the output. The window is reversed for the second half.
  257. mova m3, [windowq+offset2q]
  258. mova m4, [ inputq+offset2q]
  259. pxor m0, m0
  260. punpcklwd m0, m3
  261. punpcklwd m1, m4
  262. pmaddwd m0, m1
  263. paddd m0, m5
  264. psrad m0, 15
  265. pxor m2, m2
  266. punpckhwd m2, m3
  267. punpckhwd m1, m4
  268. pmaddwd m2, m1
  269. paddd m2, m5
  270. psrad m2, 15
  271. packssdw m0, m2
  272. mova [outputq+offset2q], m0
  273. REVERSE_WORDS m3
  274. mova m4, [ inputq+offsetq]
  275. pxor m0, m0
  276. punpcklwd m0, m3
  277. punpcklwd m1, m4
  278. pmaddwd m0, m1
  279. paddd m0, m5
  280. psrad m0, 15
  281. pxor m2, m2
  282. punpckhwd m2, m3
  283. punpckhwd m1, m4
  284. pmaddwd m2, m1
  285. paddd m2, m5
  286. psrad m2, 15
  287. packssdw m0, m2
  288. mova [outputq+offsetq], m0
  289. %else
  290. ; This version does the 16x16->16 multiplication in-place without expanding
  291. ; to 32-bit. The mmxext and sse2 versions do not use rounding, and
  292. ; therefore are not bit-identical to the C version.
  293. mova m0, [windowq+offset2q]
  294. mova m1, [ inputq+offset2q]
  295. mova m2, [ inputq+offsetq ]
  296. MUL16FIXED m1, m0, m3
  297. REVERSE_WORDS m0
  298. MUL16FIXED m2, m0, m3
  299. mova [outputq+offset2q], m1
  300. mova [outputq+offsetq ], m2
  301. %endif
  302. add offsetd, mmsize
  303. sub offset2d, mmsize
  304. jae .loop
  305. REP_RET
  306. %endmacro
  307. INIT_MMX mmxext
  308. APPLY_WINDOW_INT16 0
  309. INIT_XMM sse2
  310. APPLY_WINDOW_INT16 0
  311. INIT_MMX mmxext
  312. APPLY_WINDOW_INT16 1
  313. INIT_XMM sse2
  314. APPLY_WINDOW_INT16 1
  315. INIT_XMM ssse3
  316. APPLY_WINDOW_INT16 1
  317. INIT_XMM ssse3, atom
  318. APPLY_WINDOW_INT16 1
  319. ; void ff_add_hfyu_median_prediction_mmxext(uint8_t *dst, const uint8_t *top,
  320. ; const uint8_t *diff, int w,
  321. ; int *left, int *left_top)
  322. INIT_MMX mmxext
  323. cglobal add_hfyu_median_prediction, 6,6,0, dst, top, diff, w, left, left_top
  324. movq mm0, [topq]
  325. movq mm2, mm0
  326. movd mm4, [left_topq]
  327. psllq mm2, 8
  328. movq mm1, mm0
  329. por mm4, mm2
  330. movd mm3, [leftq]
  331. psubb mm0, mm4 ; t-tl
  332. add dstq, wq
  333. add topq, wq
  334. add diffq, wq
  335. neg wq
  336. jmp .skip
  337. .loop:
  338. movq mm4, [topq+wq]
  339. movq mm0, mm4
  340. psllq mm4, 8
  341. por mm4, mm1
  342. movq mm1, mm0 ; t
  343. psubb mm0, mm4 ; t-tl
  344. .skip:
  345. movq mm2, [diffq+wq]
  346. %assign i 0
  347. %rep 8
  348. movq mm4, mm0
  349. paddb mm4, mm3 ; t-tl+l
  350. movq mm5, mm3
  351. pmaxub mm3, mm1
  352. pminub mm5, mm1
  353. pminub mm3, mm4
  354. pmaxub mm3, mm5 ; median
  355. paddb mm3, mm2 ; +residual
  356. %if i==0
  357. movq mm7, mm3
  358. psllq mm7, 56
  359. %else
  360. movq mm6, mm3
  361. psrlq mm7, 8
  362. psllq mm6, 56
  363. por mm7, mm6
  364. %endif
  365. %if i<7
  366. psrlq mm0, 8
  367. psrlq mm1, 8
  368. psrlq mm2, 8
  369. %endif
  370. %assign i i+1
  371. %endrep
  372. movq [dstq+wq], mm7
  373. add wq, 8
  374. jl .loop
  375. movzx r2d, byte [dstq-1]
  376. mov [leftq], r2d
  377. movzx r2d, byte [topq-1]
  378. mov [left_topq], r2d
  379. RET
  380. %macro ADD_HFYU_LEFT_LOOP 2 ; %1 = dst_is_aligned, %2 = src_is_aligned
  381. add srcq, wq
  382. add dstq, wq
  383. neg wq
  384. %%.loop:
  385. %if %2
  386. mova m1, [srcq+wq]
  387. %else
  388. movu m1, [srcq+wq]
  389. %endif
  390. mova m2, m1
  391. psllw m1, 8
  392. paddb m1, m2
  393. mova m2, m1
  394. pshufb m1, m3
  395. paddb m1, m2
  396. pshufb m0, m5
  397. mova m2, m1
  398. pshufb m1, m4
  399. paddb m1, m2
  400. %if mmsize == 16
  401. mova m2, m1
  402. pshufb m1, m6
  403. paddb m1, m2
  404. %endif
  405. paddb m0, m1
  406. %if %1
  407. mova [dstq+wq], m0
  408. %else
  409. movq [dstq+wq], m0
  410. movhps [dstq+wq+8], m0
  411. %endif
  412. add wq, mmsize
  413. jl %%.loop
  414. mov eax, mmsize-1
  415. sub eax, wd
  416. movd m1, eax
  417. pshufb m0, m1
  418. movd eax, m0
  419. RET
  420. %endmacro
  421. ; int ff_add_hfyu_left_prediction(uint8_t *dst, const uint8_t *src,
  422. ; int w, int left)
  423. INIT_MMX ssse3
  424. cglobal add_hfyu_left_prediction, 3,3,7, dst, src, w, left
  425. .skip_prologue:
  426. mova m5, [pb_7]
  427. mova m4, [pb_zzzz3333zzzzbbbb]
  428. mova m3, [pb_zz11zz55zz99zzdd]
  429. movd m0, leftm
  430. psllq m0, 56
  431. ADD_HFYU_LEFT_LOOP 1, 1
  432. INIT_XMM sse4
  433. cglobal add_hfyu_left_prediction, 3,3,7, dst, src, w, left
  434. mova m5, [pb_f]
  435. mova m6, [pb_zzzzzzzz77777777]
  436. mova m4, [pb_zzzz3333zzzzbbbb]
  437. mova m3, [pb_zz11zz55zz99zzdd]
  438. movd m0, leftm
  439. pslldq m0, 15
  440. test srcq, 15
  441. jnz .src_unaligned
  442. test dstq, 15
  443. jnz .dst_unaligned
  444. ADD_HFYU_LEFT_LOOP 1, 1
  445. .dst_unaligned:
  446. ADD_HFYU_LEFT_LOOP 0, 1
  447. .src_unaligned:
  448. ADD_HFYU_LEFT_LOOP 0, 0
  449. ;-----------------------------------------------------------------------------
  450. ; void ff_vector_clip_int32(int32_t *dst, const int32_t *src, int32_t min,
  451. ; int32_t max, unsigned int len)
  452. ;-----------------------------------------------------------------------------
  453. ; %1 = number of xmm registers used
  454. ; %2 = number of inline load/process/store loops per asm loop
  455. ; %3 = process 4*mmsize (%3=0) or 8*mmsize (%3=1) bytes per loop
  456. ; %4 = CLIPD function takes min/max as float instead of int (CLIPD_SSE2)
  457. ; %5 = suffix
  458. %macro VECTOR_CLIP_INT32 4-5
  459. cglobal vector_clip_int32%5, 5,5,%1, dst, src, min, max, len
  460. %if %4
  461. cvtsi2ss m4, minm
  462. cvtsi2ss m5, maxm
  463. %else
  464. movd m4, minm
  465. movd m5, maxm
  466. %endif
  467. SPLATD m4
  468. SPLATD m5
  469. .loop:
  470. %assign %%i 1
  471. %rep %2
  472. mova m0, [srcq+mmsize*0*%%i]
  473. mova m1, [srcq+mmsize*1*%%i]
  474. mova m2, [srcq+mmsize*2*%%i]
  475. mova m3, [srcq+mmsize*3*%%i]
  476. %if %3
  477. mova m7, [srcq+mmsize*4*%%i]
  478. mova m8, [srcq+mmsize*5*%%i]
  479. mova m9, [srcq+mmsize*6*%%i]
  480. mova m10, [srcq+mmsize*7*%%i]
  481. %endif
  482. CLIPD m0, m4, m5, m6
  483. CLIPD m1, m4, m5, m6
  484. CLIPD m2, m4, m5, m6
  485. CLIPD m3, m4, m5, m6
  486. %if %3
  487. CLIPD m7, m4, m5, m6
  488. CLIPD m8, m4, m5, m6
  489. CLIPD m9, m4, m5, m6
  490. CLIPD m10, m4, m5, m6
  491. %endif
  492. mova [dstq+mmsize*0*%%i], m0
  493. mova [dstq+mmsize*1*%%i], m1
  494. mova [dstq+mmsize*2*%%i], m2
  495. mova [dstq+mmsize*3*%%i], m3
  496. %if %3
  497. mova [dstq+mmsize*4*%%i], m7
  498. mova [dstq+mmsize*5*%%i], m8
  499. mova [dstq+mmsize*6*%%i], m9
  500. mova [dstq+mmsize*7*%%i], m10
  501. %endif
  502. %assign %%i %%i+1
  503. %endrep
  504. add srcq, mmsize*4*(%2+%3)
  505. add dstq, mmsize*4*(%2+%3)
  506. sub lend, mmsize*(%2+%3)
  507. jg .loop
  508. REP_RET
  509. %endmacro
  510. INIT_MMX mmx
  511. %define CLIPD CLIPD_MMX
  512. VECTOR_CLIP_INT32 0, 1, 0, 0
  513. INIT_XMM sse2
  514. VECTOR_CLIP_INT32 6, 1, 0, 0, _int
  515. %define CLIPD CLIPD_SSE2
  516. VECTOR_CLIP_INT32 6, 2, 0, 1
  517. INIT_XMM sse4
  518. %define CLIPD CLIPD_SSE41
  519. %ifdef m8
  520. VECTOR_CLIP_INT32 11, 1, 1, 0
  521. %else
  522. VECTOR_CLIP_INT32 6, 1, 0, 0
  523. %endif
  524. ; %1 = aligned/unaligned
  525. %macro BSWAP_LOOPS 1
  526. mov r3, r2
  527. sar r2, 3
  528. jz .left4_%1
  529. .loop8_%1:
  530. mov%1 m0, [r1 + 0]
  531. mov%1 m1, [r1 + 16]
  532. %if cpuflag(ssse3)
  533. pshufb m0, m2
  534. pshufb m1, m2
  535. mov%1 [r0 + 0], m0
  536. mov%1 [r0 + 16], m1
  537. %else
  538. pshuflw m0, m0, 10110001b
  539. pshuflw m1, m1, 10110001b
  540. pshufhw m0, m0, 10110001b
  541. pshufhw m1, m1, 10110001b
  542. mova m2, m0
  543. mova m3, m1
  544. psllw m0, 8
  545. psllw m1, 8
  546. psrlw m2, 8
  547. psrlw m3, 8
  548. por m2, m0
  549. por m3, m1
  550. mov%1 [r0 + 0], m2
  551. mov%1 [r0 + 16], m3
  552. %endif
  553. add r0, 32
  554. add r1, 32
  555. dec r2
  556. jnz .loop8_%1
  557. .left4_%1:
  558. mov r2, r3
  559. and r3, 4
  560. jz .left
  561. mov%1 m0, [r1]
  562. %if cpuflag(ssse3)
  563. pshufb m0, m2
  564. mov%1 [r0], m0
  565. %else
  566. pshuflw m0, m0, 10110001b
  567. pshufhw m0, m0, 10110001b
  568. mova m2, m0
  569. psllw m0, 8
  570. psrlw m2, 8
  571. por m2, m0
  572. mov%1 [r0], m2
  573. %endif
  574. add r1, 16
  575. add r0, 16
  576. %endmacro
  577. ; void ff_bswap_buf(uint32_t *dst, const uint32_t *src, int w);
  578. %macro BSWAP32_BUF 0
  579. %if cpuflag(ssse3)
  580. cglobal bswap32_buf, 3,4,3
  581. mov r3, r1
  582. mova m2, [pb_bswap32]
  583. %else
  584. cglobal bswap32_buf, 3,4,5
  585. mov r3, r1
  586. %endif
  587. and r3, 15
  588. jz .start_align
  589. BSWAP_LOOPS u
  590. jmp .left
  591. .start_align:
  592. BSWAP_LOOPS a
  593. .left:
  594. %if cpuflag(ssse3)
  595. mov r3, r2
  596. and r2, 2
  597. jz .left1
  598. movq m0, [r1]
  599. pshufb m0, m2
  600. movq [r0], m0
  601. add r1, 8
  602. add r0, 8
  603. .left1:
  604. and r3, 1
  605. jz .end
  606. mov r2d, [r1]
  607. bswap r2d
  608. mov [r0], r2d
  609. %else
  610. and r2, 3
  611. jz .end
  612. .loop2:
  613. mov r3d, [r1]
  614. bswap r3d
  615. mov [r0], r3d
  616. add r1, 4
  617. add r0, 4
  618. dec r2
  619. jnz .loop2
  620. %endif
  621. .end:
  622. RET
  623. %endmacro
  624. INIT_XMM sse2
  625. BSWAP32_BUF
  626. INIT_XMM ssse3
  627. BSWAP32_BUF