You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1843 lines
52KB

  1. ;******************************************************************************
  2. ;* VP8 MMXEXT optimizations
  3. ;* Copyright (c) 2010 Ronald S. Bultje <rsbultje@gmail.com>
  4. ;* Copyright (c) 2010 Jason Garrett-Glaser <darkshikari@gmail.com>
  5. ;*
  6. ;* This file is part of FFmpeg.
  7. ;*
  8. ;* FFmpeg is free software; you can redistribute it and/or
  9. ;* modify it under the terms of the GNU Lesser General Public
  10. ;* License as published by the Free Software Foundation; either
  11. ;* version 2.1 of the License, or (at your option) any later version.
  12. ;*
  13. ;* FFmpeg is distributed in the hope that it will be useful,
  14. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. ;* Lesser General Public License for more details.
  17. ;*
  18. ;* You should have received a copy of the GNU Lesser General Public
  19. ;* License along with FFmpeg; if not, write to the Free Software
  20. ;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. ;******************************************************************************
  22. %include "x86inc.asm"
  23. %include "x86util.asm"
  24. SECTION_RODATA
  25. fourtap_filter_hw_m: times 4 dw -6, 123
  26. times 4 dw 12, -1
  27. times 4 dw -9, 93
  28. times 4 dw 50, -6
  29. times 4 dw -6, 50
  30. times 4 dw 93, -9
  31. times 4 dw -1, 12
  32. times 4 dw 123, -6
  33. sixtap_filter_hw_m: times 4 dw 2, -11
  34. times 4 dw 108, 36
  35. times 4 dw -8, 1
  36. times 4 dw 3, -16
  37. times 4 dw 77, 77
  38. times 4 dw -16, 3
  39. times 4 dw 1, -8
  40. times 4 dw 36, 108
  41. times 4 dw -11, 2
  42. fourtap_filter_hb_m: times 8 db -6, 123
  43. times 8 db 12, -1
  44. times 8 db -9, 93
  45. times 8 db 50, -6
  46. times 8 db -6, 50
  47. times 8 db 93, -9
  48. times 8 db -1, 12
  49. times 8 db 123, -6
  50. sixtap_filter_hb_m: times 8 db 2, 1
  51. times 8 db -11, 108
  52. times 8 db 36, -8
  53. times 8 db 3, 3
  54. times 8 db -16, 77
  55. times 8 db 77, -16
  56. times 8 db 1, 2
  57. times 8 db -8, 36
  58. times 8 db 108, -11
  59. fourtap_filter_v_m: times 8 dw -6
  60. times 8 dw 123
  61. times 8 dw 12
  62. times 8 dw -1
  63. times 8 dw -9
  64. times 8 dw 93
  65. times 8 dw 50
  66. times 8 dw -6
  67. times 8 dw -6
  68. times 8 dw 50
  69. times 8 dw 93
  70. times 8 dw -9
  71. times 8 dw -1
  72. times 8 dw 12
  73. times 8 dw 123
  74. times 8 dw -6
  75. sixtap_filter_v_m: times 8 dw 2
  76. times 8 dw -11
  77. times 8 dw 108
  78. times 8 dw 36
  79. times 8 dw -8
  80. times 8 dw 1
  81. times 8 dw 3
  82. times 8 dw -16
  83. times 8 dw 77
  84. times 8 dw 77
  85. times 8 dw -16
  86. times 8 dw 3
  87. times 8 dw 1
  88. times 8 dw -8
  89. times 8 dw 36
  90. times 8 dw 108
  91. times 8 dw -11
  92. times 8 dw 2
  93. bilinear_filter_vw_m: times 8 dw 1
  94. times 8 dw 2
  95. times 8 dw 3
  96. times 8 dw 4
  97. times 8 dw 5
  98. times 8 dw 6
  99. times 8 dw 7
  100. bilinear_filter_vb_m: times 8 db 7, 1
  101. times 8 db 6, 2
  102. times 8 db 5, 3
  103. times 8 db 4, 4
  104. times 8 db 3, 5
  105. times 8 db 2, 6
  106. times 8 db 1, 7
  107. %ifdef PIC
  108. %define fourtap_filter_hw r11
  109. %define sixtap_filter_hw r11
  110. %define fourtap_filter_hb r11
  111. %define sixtap_filter_hb r11
  112. %define fourtap_filter_v r11
  113. %define sixtap_filter_v r11
  114. %define bilinear_filter_vw r11
  115. %define bilinear_filter_vb r11
  116. %else
  117. %define fourtap_filter_hw fourtap_filter_hw_m
  118. %define sixtap_filter_hw sixtap_filter_hw_m
  119. %define fourtap_filter_hb fourtap_filter_hb_m
  120. %define sixtap_filter_hb sixtap_filter_hb_m
  121. %define fourtap_filter_v fourtap_filter_v_m
  122. %define sixtap_filter_v sixtap_filter_v_m
  123. %define bilinear_filter_vw bilinear_filter_vw_m
  124. %define bilinear_filter_vb bilinear_filter_vb_m
  125. %endif
  126. filter_h2_shuf: db 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8
  127. filter_h4_shuf: db 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10
  128. filter_h6_shuf1: db 0, 5, 1, 6, 2, 7, 3, 8, 4, 9, 5, 10, 6, 11, 7, 12
  129. filter_h6_shuf2: db 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9
  130. filter_h6_shuf3: db 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11
  131. pw_20091: times 4 dw 20091
  132. pw_17734: times 4 dw 17734
  133. cextern pb_1
  134. cextern pw_3
  135. cextern pb_3
  136. cextern pw_4
  137. cextern pb_4
  138. cextern pw_64
  139. cextern pb_80
  140. cextern pb_F8
  141. cextern pb_FE
  142. SECTION .text
  143. ;-----------------------------------------------------------------------------
  144. ; subpel MC functions:
  145. ;
  146. ; void put_vp8_epel<size>_h<htap>v<vtap>_<opt>(uint8_t *dst, int deststride,
  147. ; uint8_t *src, int srcstride,
  148. ; int height, int mx, int my);
  149. ;-----------------------------------------------------------------------------
  150. %macro FILTER_SSSE3 3
  151. cglobal put_vp8_epel%1_h6_ssse3, 6, 6, %2
  152. lea r5d, [r5*3]
  153. mova m3, [filter_h6_shuf2]
  154. mova m4, [filter_h6_shuf3]
  155. %ifdef PIC
  156. lea r11, [sixtap_filter_hb_m]
  157. %endif
  158. mova m5, [sixtap_filter_hb+r5*8-48] ; set up 6tap filter in bytes
  159. mova m6, [sixtap_filter_hb+r5*8-32]
  160. mova m7, [sixtap_filter_hb+r5*8-16]
  161. .nextrow
  162. movu m0, [r2-2]
  163. mova m1, m0
  164. mova m2, m0
  165. %ifidn %1, 4
  166. ; For epel4, we need 9 bytes, but only 8 get loaded; to compensate, do the
  167. ; shuffle with a memory operand
  168. punpcklbw m0, [r2+3]
  169. %else
  170. pshufb m0, [filter_h6_shuf1]
  171. %endif
  172. pshufb m1, m3
  173. pshufb m2, m4
  174. pmaddubsw m0, m5
  175. pmaddubsw m1, m6
  176. pmaddubsw m2, m7
  177. paddsw m0, m1
  178. paddsw m0, m2
  179. paddsw m0, [pw_64]
  180. psraw m0, 7
  181. packuswb m0, m0
  182. movh [r0], m0 ; store
  183. ; go to next line
  184. add r0, r1
  185. add r2, r3
  186. dec r4 ; next row
  187. jg .nextrow
  188. REP_RET
  189. cglobal put_vp8_epel%1_h4_ssse3, 6, 6, %3
  190. shl r5d, 4
  191. mova m2, [pw_64]
  192. mova m3, [filter_h2_shuf]
  193. mova m4, [filter_h4_shuf]
  194. %ifdef PIC
  195. lea r11, [fourtap_filter_hb_m]
  196. %endif
  197. mova m5, [fourtap_filter_hb+r5-16] ; set up 4tap filter in bytes
  198. mova m6, [fourtap_filter_hb+r5]
  199. .nextrow
  200. movu m0, [r2-1]
  201. mova m1, m0
  202. pshufb m0, m3
  203. pshufb m1, m4
  204. pmaddubsw m0, m5
  205. pmaddubsw m1, m6
  206. paddsw m0, m2
  207. paddsw m0, m1
  208. psraw m0, 7
  209. packuswb m0, m0
  210. movh [r0], m0 ; store
  211. ; go to next line
  212. add r0, r1
  213. add r2, r3
  214. dec r4 ; next row
  215. jg .nextrow
  216. REP_RET
  217. cglobal put_vp8_epel%1_v4_ssse3, 7, 7, %2
  218. shl r6d, 4
  219. %ifdef PIC
  220. lea r11, [fourtap_filter_hb_m]
  221. %endif
  222. mova m5, [fourtap_filter_hb+r6-16]
  223. mova m6, [fourtap_filter_hb+r6]
  224. mova m7, [pw_64]
  225. ; read 3 lines
  226. sub r2, r3
  227. movh m0, [r2]
  228. movh m1, [r2+ r3]
  229. movh m2, [r2+2*r3]
  230. add r2, r3
  231. .nextrow
  232. movh m3, [r2+2*r3] ; read new row
  233. mova m4, m0
  234. mova m0, m1
  235. punpcklbw m4, m1
  236. mova m1, m2
  237. punpcklbw m2, m3
  238. pmaddubsw m4, m5
  239. pmaddubsw m2, m6
  240. paddsw m4, m2
  241. mova m2, m3
  242. paddsw m4, m7
  243. psraw m4, 7
  244. packuswb m4, m4
  245. movh [r0], m4
  246. ; go to next line
  247. add r0, r1
  248. add r2, r3
  249. dec r4 ; next row
  250. jg .nextrow
  251. REP_RET
  252. cglobal put_vp8_epel%1_v6_ssse3, 7, 7, %2
  253. lea r6d, [r6*3]
  254. %ifdef PIC
  255. lea r11, [sixtap_filter_hb_m]
  256. %endif
  257. lea r6, [sixtap_filter_hb+r6*8]
  258. ; read 5 lines
  259. sub r2, r3
  260. sub r2, r3
  261. movh m0, [r2]
  262. movh m1, [r2+r3]
  263. movh m2, [r2+r3*2]
  264. lea r2, [r2+r3*2]
  265. add r2, r3
  266. movh m3, [r2]
  267. movh m4, [r2+r3]
  268. .nextrow
  269. movh m5, [r2+2*r3] ; read new row
  270. mova m6, m0
  271. punpcklbw m6, m5
  272. mova m0, m1
  273. punpcklbw m1, m2
  274. mova m7, m3
  275. punpcklbw m7, m4
  276. pmaddubsw m6, [r6-48]
  277. pmaddubsw m1, [r6-32]
  278. pmaddubsw m7, [r6-16]
  279. paddsw m6, m1
  280. paddsw m6, m7
  281. mova m1, m2
  282. paddsw m6, [pw_64]
  283. mova m2, m3
  284. psraw m6, 7
  285. mova m3, m4
  286. packuswb m6, m6
  287. mova m4, m5
  288. movh [r0], m6
  289. ; go to next line
  290. add r0, r1
  291. add r2, r3
  292. dec r4 ; next row
  293. jg .nextrow
  294. REP_RET
  295. %endmacro
  296. INIT_MMX
  297. FILTER_SSSE3 4, 0, 0
  298. INIT_XMM
  299. FILTER_SSSE3 8, 8, 7
  300. ; 4x4 block, H-only 4-tap filter
  301. cglobal put_vp8_epel4_h4_mmxext, 6, 6
  302. shl r5d, 4
  303. %ifdef PIC
  304. lea r11, [fourtap_filter_hw_m]
  305. %endif
  306. movq mm4, [fourtap_filter_hw+r5-16] ; set up 4tap filter in words
  307. movq mm5, [fourtap_filter_hw+r5]
  308. movq mm7, [pw_64]
  309. pxor mm6, mm6
  310. .nextrow
  311. movq mm1, [r2-1] ; (ABCDEFGH) load 8 horizontal pixels
  312. ; first set of 2 pixels
  313. movq mm2, mm1 ; byte ABCD..
  314. punpcklbw mm1, mm6 ; byte->word ABCD
  315. pshufw mm0, mm2, 9 ; byte CDEF..
  316. punpcklbw mm0, mm6 ; byte->word CDEF
  317. pshufw mm3, mm1, 0x94 ; word ABBC
  318. pshufw mm1, mm0, 0x94 ; word CDDE
  319. pmaddwd mm3, mm4 ; multiply 2px with F0/F1
  320. movq mm0, mm1 ; backup for second set of pixels
  321. pmaddwd mm1, mm5 ; multiply 2px with F2/F3
  322. paddd mm3, mm1 ; finish 1st 2px
  323. ; second set of 2 pixels, use backup of above
  324. punpckhbw mm2, mm6 ; byte->word EFGH
  325. pmaddwd mm0, mm4 ; multiply backed up 2px with F0/F1
  326. pshufw mm1, mm2, 0x94 ; word EFFG
  327. pmaddwd mm1, mm5 ; multiply 2px with F2/F3
  328. paddd mm0, mm1 ; finish 2nd 2px
  329. ; merge two sets of 2 pixels into one set of 4, round/clip/store
  330. packssdw mm3, mm0 ; merge dword->word (4px)
  331. paddsw mm3, mm7 ; rounding
  332. psraw mm3, 7
  333. packuswb mm3, mm6 ; clip and word->bytes
  334. movd [r0], mm3 ; store
  335. ; go to next line
  336. add r0, r1
  337. add r2, r3
  338. dec r4 ; next row
  339. jg .nextrow
  340. REP_RET
  341. ; 4x4 block, H-only 6-tap filter
  342. cglobal put_vp8_epel4_h6_mmxext, 6, 6
  343. lea r5d, [r5*3]
  344. %ifdef PIC
  345. lea r11, [sixtap_filter_hw_m]
  346. %endif
  347. movq mm4, [sixtap_filter_hw+r5*8-48] ; set up 4tap filter in words
  348. movq mm5, [sixtap_filter_hw+r5*8-32]
  349. movq mm6, [sixtap_filter_hw+r5*8-16]
  350. movq mm7, [pw_64]
  351. pxor mm3, mm3
  352. .nextrow
  353. movq mm1, [r2-2] ; (ABCDEFGH) load 8 horizontal pixels
  354. ; first set of 2 pixels
  355. movq mm2, mm1 ; byte ABCD..
  356. punpcklbw mm1, mm3 ; byte->word ABCD
  357. pshufw mm0, mm2, 0x9 ; byte CDEF..
  358. punpckhbw mm2, mm3 ; byte->word EFGH
  359. punpcklbw mm0, mm3 ; byte->word CDEF
  360. pshufw mm1, mm1, 0x94 ; word ABBC
  361. pshufw mm2, mm2, 0x94 ; word EFFG
  362. pmaddwd mm1, mm4 ; multiply 2px with F0/F1
  363. pshufw mm3, mm0, 0x94 ; word CDDE
  364. movq mm0, mm3 ; backup for second set of pixels
  365. pmaddwd mm3, mm5 ; multiply 2px with F2/F3
  366. paddd mm1, mm3 ; add to 1st 2px cache
  367. movq mm3, mm2 ; backup for second set of pixels
  368. pmaddwd mm2, mm6 ; multiply 2px with F4/F5
  369. paddd mm1, mm2 ; finish 1st 2px
  370. ; second set of 2 pixels, use backup of above
  371. movd mm2, [r2+3] ; byte FGHI (prevent overreads)
  372. pmaddwd mm0, mm4 ; multiply 1st backed up 2px with F0/F1
  373. pmaddwd mm3, mm5 ; multiply 2nd backed up 2px with F2/F3
  374. paddd mm0, mm3 ; add to 2nd 2px cache
  375. pxor mm3, mm3
  376. punpcklbw mm2, mm3 ; byte->word FGHI
  377. pshufw mm2, mm2, 0xE9 ; word GHHI
  378. pmaddwd mm2, mm6 ; multiply 2px with F4/F5
  379. paddd mm0, mm2 ; finish 2nd 2px
  380. ; merge two sets of 2 pixels into one set of 4, round/clip/store
  381. packssdw mm1, mm0 ; merge dword->word (4px)
  382. paddsw mm1, mm7 ; rounding
  383. psraw mm1, 7
  384. packuswb mm1, mm3 ; clip and word->bytes
  385. movd [r0], mm1 ; store
  386. ; go to next line
  387. add r0, r1
  388. add r2, r3
  389. dec r4 ; next row
  390. jg .nextrow
  391. REP_RET
  392. ; 4x4 block, H-only 4-tap filter
  393. INIT_XMM
  394. cglobal put_vp8_epel8_h4_sse2, 6, 6, 8
  395. shl r5d, 4
  396. %ifdef PIC
  397. lea r11, [fourtap_filter_hw_m]
  398. %endif
  399. mova m5, [fourtap_filter_hw+r5-16] ; set up 4tap filter in words
  400. mova m6, [fourtap_filter_hw+r5]
  401. pxor m7, m7
  402. .nextrow
  403. movh m0, [r2-1]
  404. punpcklbw m0, m7 ; ABCDEFGH
  405. mova m1, m0
  406. mova m2, m0
  407. mova m3, m0
  408. psrldq m1, 2 ; BCDEFGH
  409. psrldq m2, 4 ; CDEFGH
  410. psrldq m3, 6 ; DEFGH
  411. punpcklwd m0, m1 ; ABBCCDDE
  412. punpcklwd m2, m3 ; CDDEEFFG
  413. pmaddwd m0, m5
  414. pmaddwd m2, m6
  415. paddd m0, m2
  416. movh m1, [r2+3]
  417. punpcklbw m1, m7 ; ABCDEFGH
  418. mova m2, m1
  419. mova m3, m1
  420. mova m4, m1
  421. psrldq m2, 2 ; BCDEFGH
  422. psrldq m3, 4 ; CDEFGH
  423. psrldq m4, 6 ; DEFGH
  424. punpcklwd m1, m2 ; ABBCCDDE
  425. punpcklwd m3, m4 ; CDDEEFFG
  426. pmaddwd m1, m5
  427. pmaddwd m3, m6
  428. paddd m1, m3
  429. packssdw m0, m1
  430. paddsw m0, [pw_64]
  431. psraw m0, 7
  432. packuswb m0, m7
  433. movh [r0], m0 ; store
  434. ; go to next line
  435. add r0, r1
  436. add r2, r3
  437. dec r4 ; next row
  438. jg .nextrow
  439. REP_RET
  440. cglobal put_vp8_epel8_h6_sse2, 6, 6, 8
  441. lea r5d, [r5*3]
  442. %ifdef PIC
  443. lea r11, [sixtap_filter_hw_m]
  444. %endif
  445. lea r5, [sixtap_filter_hw+r5*8]
  446. pxor m7, m7
  447. .nextrow
  448. movu m0, [r2-2]
  449. mova m6, m0
  450. mova m4, m0
  451. punpcklbw m0, m7 ; ABCDEFGHI
  452. mova m1, m0
  453. mova m2, m0
  454. mova m3, m0
  455. psrldq m1, 2 ; BCDEFGH
  456. psrldq m2, 4 ; CDEFGH
  457. psrldq m3, 6 ; DEFGH
  458. psrldq m4, 4
  459. punpcklbw m4, m7 ; EFGH
  460. mova m5, m4
  461. psrldq m5, 2 ; FGH
  462. punpcklwd m0, m1 ; ABBCCDDE
  463. punpcklwd m2, m3 ; CDDEEFFG
  464. punpcklwd m4, m5 ; EFFGGHHI
  465. pmaddwd m0, [r5-48]
  466. pmaddwd m2, [r5-32]
  467. pmaddwd m4, [r5-16]
  468. paddd m0, m2
  469. paddd m0, m4
  470. psrldq m6, 4
  471. mova m4, m6
  472. punpcklbw m6, m7 ; ABCDEFGHI
  473. mova m1, m6
  474. mova m2, m6
  475. mova m3, m6
  476. psrldq m1, 2 ; BCDEFGH
  477. psrldq m2, 4 ; CDEFGH
  478. psrldq m3, 6 ; DEFGH
  479. psrldq m4, 4
  480. punpcklbw m4, m7 ; EFGH
  481. mova m5, m4
  482. psrldq m5, 2 ; FGH
  483. punpcklwd m6, m1 ; ABBCCDDE
  484. punpcklwd m2, m3 ; CDDEEFFG
  485. punpcklwd m4, m5 ; EFFGGHHI
  486. pmaddwd m6, [r5-48]
  487. pmaddwd m2, [r5-32]
  488. pmaddwd m4, [r5-16]
  489. paddd m6, m2
  490. paddd m6, m4
  491. packssdw m0, m6
  492. paddsw m0, [pw_64]
  493. psraw m0, 7
  494. packuswb m0, m7
  495. movh [r0], m0 ; store
  496. ; go to next line
  497. add r0, r1
  498. add r2, r3
  499. dec r4 ; next row
  500. jg .nextrow
  501. REP_RET
  502. %macro FILTER_V 3
  503. ; 4x4 block, V-only 4-tap filter
  504. cglobal put_vp8_epel%2_v4_%1, 7, 7, %3
  505. shl r6d, 5
  506. %ifdef PIC
  507. lea r11, [fourtap_filter_v_m]
  508. %endif
  509. lea r6, [fourtap_filter_v+r6-32]
  510. mova m6, [pw_64]
  511. pxor m7, m7
  512. mova m5, [r6+48]
  513. ; read 3 lines
  514. sub r2, r3
  515. movh m0, [r2]
  516. movh m1, [r2+ r3]
  517. movh m2, [r2+2*r3]
  518. add r2, r3
  519. punpcklbw m0, m7
  520. punpcklbw m1, m7
  521. punpcklbw m2, m7
  522. .nextrow
  523. ; first calculate negative taps (to prevent losing positive overflows)
  524. movh m4, [r2+2*r3] ; read new row
  525. punpcklbw m4, m7
  526. mova m3, m4
  527. pmullw m0, [r6+0]
  528. pmullw m4, m5
  529. paddsw m4, m0
  530. ; then calculate positive taps
  531. mova m0, m1
  532. pmullw m1, [r6+16]
  533. paddsw m4, m1
  534. mova m1, m2
  535. pmullw m2, [r6+32]
  536. paddsw m4, m2
  537. mova m2, m3
  538. ; round/clip/store
  539. paddsw m4, m6
  540. psraw m4, 7
  541. packuswb m4, m7
  542. movh [r0], m4
  543. ; go to next line
  544. add r0, r1
  545. add r2, r3
  546. dec r4 ; next row
  547. jg .nextrow
  548. REP_RET
  549. ; 4x4 block, V-only 6-tap filter
  550. cglobal put_vp8_epel%2_v6_%1, 7, 7, %3
  551. shl r6d, 4
  552. lea r6, [r6*3]
  553. %ifdef PIC
  554. lea r11, [sixtap_filter_v_m]
  555. %endif
  556. lea r6, [sixtap_filter_v+r6-96]
  557. pxor m7, m7
  558. ; read 5 lines
  559. sub r2, r3
  560. sub r2, r3
  561. movh m0, [r2]
  562. movh m1, [r2+r3]
  563. movh m2, [r2+r3*2]
  564. lea r2, [r2+r3*2]
  565. add r2, r3
  566. movh m3, [r2]
  567. movh m4, [r2+r3]
  568. punpcklbw m0, m7
  569. punpcklbw m1, m7
  570. punpcklbw m2, m7
  571. punpcklbw m3, m7
  572. punpcklbw m4, m7
  573. .nextrow
  574. ; first calculate negative taps (to prevent losing positive overflows)
  575. mova m5, m1
  576. pmullw m5, [r6+16]
  577. mova m6, m4
  578. pmullw m6, [r6+64]
  579. paddsw m6, m5
  580. ; then calculate positive taps
  581. movh m5, [r2+2*r3] ; read new row
  582. punpcklbw m5, m7
  583. pmullw m0, [r6+0]
  584. paddsw m6, m0
  585. mova m0, m1
  586. mova m1, m2
  587. pmullw m2, [r6+32]
  588. paddsw m6, m2
  589. mova m2, m3
  590. pmullw m3, [r6+48]
  591. paddsw m6, m3
  592. mova m3, m4
  593. mova m4, m5
  594. pmullw m5, [r6+80]
  595. paddsw m6, m5
  596. ; round/clip/store
  597. paddsw m6, [pw_64]
  598. psraw m6, 7
  599. packuswb m6, m7
  600. movh [r0], m6
  601. ; go to next line
  602. add r0, r1
  603. add r2, r3
  604. dec r4 ; next row
  605. jg .nextrow
  606. REP_RET
  607. %endmacro
  608. INIT_MMX
  609. FILTER_V mmxext, 4, 0
  610. INIT_XMM
  611. FILTER_V sse2, 8, 8
  612. %macro FILTER_BILINEAR 3
  613. cglobal put_vp8_bilinear%2_v_%1, 7,7,%3
  614. mov r5d, 8*16
  615. shl r6d, 4
  616. sub r5d, r6d
  617. %ifdef PIC
  618. lea r11, [bilinear_filter_vw_m]
  619. %endif
  620. pxor m6, m6
  621. mova m4, [bilinear_filter_vw+r5-16]
  622. mova m5, [bilinear_filter_vw+r6-16]
  623. .nextrow
  624. movh m0, [r2+r3*0]
  625. movh m1, [r2+r3*1]
  626. movh m3, [r2+r3*2]
  627. punpcklbw m0, m6
  628. punpcklbw m1, m6
  629. punpcklbw m3, m6
  630. mova m2, m1
  631. pmullw m0, m4
  632. pmullw m1, m5
  633. pmullw m2, m4
  634. pmullw m3, m5
  635. paddsw m0, m1
  636. paddsw m2, m3
  637. psraw m0, 2
  638. psraw m2, 2
  639. pavgw m0, m6
  640. pavgw m2, m6
  641. %ifidn %1, mmxext
  642. packuswb m0, m0
  643. packuswb m2, m2
  644. movh [r0+r1*0], m0
  645. movh [r0+r1*1], m2
  646. %else
  647. packuswb m0, m2
  648. movh [r0+r1*0], m0
  649. movhps [r0+r1*1], m0
  650. %endif
  651. lea r0, [r0+r1*2]
  652. lea r2, [r2+r3*2]
  653. sub r4, 2
  654. jg .nextrow
  655. REP_RET
  656. cglobal put_vp8_bilinear%2_h_%1, 7,7,%3
  657. mov r6d, 8*16
  658. shl r5d, 4
  659. sub r6d, r5d
  660. %ifdef PIC
  661. lea r11, [bilinear_filter_vw_m]
  662. %endif
  663. pxor m6, m6
  664. mova m4, [bilinear_filter_vw+r6-16]
  665. mova m5, [bilinear_filter_vw+r5-16]
  666. .nextrow
  667. movh m0, [r2+r3*0+0]
  668. movh m1, [r2+r3*0+1]
  669. movh m2, [r2+r3*1+0]
  670. movh m3, [r2+r3*1+1]
  671. punpcklbw m0, m6
  672. punpcklbw m1, m6
  673. punpcklbw m2, m6
  674. punpcklbw m3, m6
  675. pmullw m0, m4
  676. pmullw m1, m5
  677. pmullw m2, m4
  678. pmullw m3, m5
  679. paddsw m0, m1
  680. paddsw m2, m3
  681. psraw m0, 2
  682. psraw m2, 2
  683. pavgw m0, m6
  684. pavgw m2, m6
  685. %ifidn %1, mmxext
  686. packuswb m0, m0
  687. packuswb m2, m2
  688. movh [r0+r1*0], m0
  689. movh [r0+r1*1], m2
  690. %else
  691. packuswb m0, m2
  692. movh [r0+r1*0], m0
  693. movhps [r0+r1*1], m0
  694. %endif
  695. lea r0, [r0+r1*2]
  696. lea r2, [r2+r3*2]
  697. sub r4, 2
  698. jg .nextrow
  699. REP_RET
  700. %endmacro
  701. INIT_MMX
  702. FILTER_BILINEAR mmxext, 4, 0
  703. INIT_XMM
  704. FILTER_BILINEAR sse2, 8, 7
  705. %macro FILTER_BILINEAR_SSSE3 1
  706. cglobal put_vp8_bilinear%1_v_ssse3, 7,7
  707. shl r6d, 4
  708. %ifdef PIC
  709. lea r11, [bilinear_filter_vb_m]
  710. %endif
  711. pxor m4, m4
  712. mova m3, [bilinear_filter_vb+r6-16]
  713. .nextrow
  714. movh m0, [r2+r3*0]
  715. movh m1, [r2+r3*1]
  716. movh m2, [r2+r3*2]
  717. punpcklbw m0, m1
  718. punpcklbw m1, m2
  719. pmaddubsw m0, m3
  720. pmaddubsw m1, m3
  721. psraw m0, 2
  722. psraw m1, 2
  723. pavgw m0, m4
  724. pavgw m1, m4
  725. %if mmsize==8
  726. packuswb m0, m0
  727. packuswb m1, m1
  728. movh [r0+r1*0], m0
  729. movh [r0+r1*1], m1
  730. %else
  731. packuswb m0, m1
  732. movh [r0+r1*0], m0
  733. movhps [r0+r1*1], m0
  734. %endif
  735. lea r0, [r0+r1*2]
  736. lea r2, [r2+r3*2]
  737. sub r4, 2
  738. jg .nextrow
  739. REP_RET
  740. cglobal put_vp8_bilinear%1_h_ssse3, 7,7
  741. shl r5d, 4
  742. %ifdef PIC
  743. lea r11, [bilinear_filter_vb_m]
  744. %endif
  745. pxor m4, m4
  746. mova m2, [filter_h2_shuf]
  747. mova m3, [bilinear_filter_vb+r5-16]
  748. .nextrow
  749. movu m0, [r2+r3*0]
  750. movu m1, [r2+r3*1]
  751. pshufb m0, m2
  752. pshufb m1, m2
  753. pmaddubsw m0, m3
  754. pmaddubsw m1, m3
  755. psraw m0, 2
  756. psraw m1, 2
  757. pavgw m0, m4
  758. pavgw m1, m4
  759. %if mmsize==8
  760. packuswb m0, m0
  761. packuswb m1, m1
  762. movh [r0+r1*0], m0
  763. movh [r0+r1*1], m1
  764. %else
  765. packuswb m0, m1
  766. movh [r0+r1*0], m0
  767. movhps [r0+r1*1], m0
  768. %endif
  769. lea r0, [r0+r1*2]
  770. lea r2, [r2+r3*2]
  771. sub r4, 2
  772. jg .nextrow
  773. REP_RET
  774. %endmacro
  775. INIT_MMX
  776. FILTER_BILINEAR_SSSE3 4
  777. INIT_XMM
  778. FILTER_BILINEAR_SSSE3 8
  779. cglobal put_vp8_pixels8_mmx, 5,5
  780. .nextrow:
  781. movq mm0, [r2+r3*0]
  782. movq mm1, [r2+r3*1]
  783. lea r2, [r2+r3*2]
  784. movq [r0+r1*0], mm0
  785. movq [r0+r1*1], mm1
  786. lea r0, [r0+r1*2]
  787. sub r4d, 2
  788. jg .nextrow
  789. REP_RET
  790. cglobal put_vp8_pixels16_mmx, 5,5
  791. .nextrow:
  792. movq mm0, [r2+r3*0+0]
  793. movq mm1, [r2+r3*0+8]
  794. movq mm2, [r2+r3*1+0]
  795. movq mm3, [r2+r3*1+8]
  796. lea r2, [r2+r3*2]
  797. movq [r0+r1*0+0], mm0
  798. movq [r0+r1*0+8], mm1
  799. movq [r0+r1*1+0], mm2
  800. movq [r0+r1*1+8], mm3
  801. lea r0, [r0+r1*2]
  802. sub r4d, 2
  803. jg .nextrow
  804. REP_RET
  805. cglobal put_vp8_pixels16_sse, 5,5,2
  806. .nextrow:
  807. movups xmm0, [r2+r3*0]
  808. movups xmm1, [r2+r3*1]
  809. lea r2, [r2+r3*2]
  810. movaps [r0+r1*0], xmm0
  811. movaps [r0+r1*1], xmm1
  812. lea r0, [r0+r1*2]
  813. sub r4d, 2
  814. jg .nextrow
  815. REP_RET
  816. ;-----------------------------------------------------------------------------
  817. ; IDCT functions:
  818. ;
  819. ; void vp8_idct_dc_add_<opt>(uint8_t *dst, DCTELEM block[16], int stride);
  820. ;-----------------------------------------------------------------------------
  821. cglobal vp8_idct_dc_add_mmx, 3, 3
  822. ; load data
  823. movd mm0, [r1]
  824. ; calculate DC
  825. paddw mm0, [pw_4]
  826. pxor mm1, mm1
  827. psraw mm0, 3
  828. psubw mm1, mm0
  829. packuswb mm0, mm0
  830. packuswb mm1, mm1
  831. punpcklbw mm0, mm0
  832. punpcklbw mm1, mm1
  833. punpcklwd mm0, mm0
  834. punpcklwd mm1, mm1
  835. ; add DC
  836. lea r1, [r0+r2*2]
  837. movd mm2, [r0]
  838. movd mm3, [r0+r2]
  839. movd mm4, [r1]
  840. movd mm5, [r1+r2]
  841. paddusb mm2, mm0
  842. paddusb mm3, mm0
  843. paddusb mm4, mm0
  844. paddusb mm5, mm0
  845. psubusb mm2, mm1
  846. psubusb mm3, mm1
  847. psubusb mm4, mm1
  848. psubusb mm5, mm1
  849. movd [r0], mm2
  850. movd [r0+r2], mm3
  851. movd [r1], mm4
  852. movd [r1+r2], mm5
  853. RET
  854. cglobal vp8_idct_dc_add_sse4, 3, 3, 6
  855. ; load data
  856. movd xmm0, [r1]
  857. lea r1, [r0+r2*2]
  858. pxor xmm1, xmm1
  859. movq xmm2, [pw_4]
  860. ; calculate DC
  861. paddw xmm0, xmm2
  862. movd xmm2, [r0]
  863. movd xmm3, [r0+r2]
  864. movd xmm4, [r1]
  865. movd xmm5, [r1+r2]
  866. psraw xmm0, 3
  867. pshuflw xmm0, xmm0, 0
  868. punpcklqdq xmm0, xmm0
  869. punpckldq xmm2, xmm3
  870. punpckldq xmm4, xmm5
  871. punpcklbw xmm2, xmm1
  872. punpcklbw xmm4, xmm1
  873. paddw xmm2, xmm0
  874. paddw xmm4, xmm0
  875. packuswb xmm2, xmm4
  876. movd [r0], xmm2
  877. pextrd [r0+r2], xmm2, 1
  878. pextrd [r1], xmm2, 2
  879. pextrd [r1+r2], xmm2, 3
  880. RET
  881. ;-----------------------------------------------------------------------------
  882. ; void vp8_idct_add_<opt>(uint8_t *dst, DCTELEM block[16], int stride);
  883. ;-----------------------------------------------------------------------------
  884. ; calculate %1=mul_35468(%1)-mul_20091(%2); %2=mul_20091(%1)+mul_35468(%2)
  885. ; this macro assumes that m6/m7 have words for 20091/17734 loaded
  886. %macro VP8_MULTIPLY_SUMSUB 4
  887. mova %3, %1
  888. mova %4, %2
  889. pmulhw %3, m6 ;20091(1)
  890. pmulhw %4, m6 ;20091(2)
  891. paddw %3, %1
  892. paddw %4, %2
  893. paddw %1, %1
  894. paddw %2, %2
  895. pmulhw %1, m7 ;35468(1)
  896. pmulhw %2, m7 ;35468(2)
  897. psubw %1, %4
  898. paddw %2, %3
  899. %endmacro
  900. ; calculate x0=%1+%3; x1=%1-%3
  901. ; x2=mul_35468(%2)-mul_20091(%4); x3=mul_20091(%2)+mul_35468(%4)
  902. ; %1=x0+x3 (tmp0); %2=x1+x2 (tmp1); %3=x1-x2 (tmp2); %4=x0-x3 (tmp3)
  903. ; %5/%6 are temporary registers
  904. ; we assume m6/m7 have constant words 20091/17734 loaded in them
  905. %macro VP8_IDCT_TRANSFORM4x4_1D 6
  906. SUMSUB_BA m%3, m%1, m%5 ;t0, t1
  907. VP8_MULTIPLY_SUMSUB m%2, m%4, m%5,m%6 ;t2, t3
  908. SUMSUB_BA m%4, m%3, m%5 ;tmp0, tmp3
  909. SUMSUB_BA m%2, m%1, m%5 ;tmp1, tmp2
  910. SWAP %4, %1
  911. SWAP %4, %3
  912. %endmacro
  913. INIT_MMX
  914. cglobal vp8_idct_add_mmx, 3, 3
  915. ; load block data
  916. movq m0, [r1]
  917. movq m1, [r1+8]
  918. movq m2, [r1+16]
  919. movq m3, [r1+24]
  920. movq m6, [pw_20091]
  921. movq m7, [pw_17734]
  922. ; actual IDCT
  923. VP8_IDCT_TRANSFORM4x4_1D 0, 1, 2, 3, 4, 5
  924. TRANSPOSE4x4W 0, 1, 2, 3, 4
  925. paddw m0, [pw_4]
  926. VP8_IDCT_TRANSFORM4x4_1D 0, 1, 2, 3, 4, 5
  927. TRANSPOSE4x4W 0, 1, 2, 3, 4
  928. ; store
  929. pxor m4, m4
  930. lea r1, [r0+2*r2]
  931. STORE_DIFFx2 m0, m1, m6, m7, m4, 3, r0, r2
  932. STORE_DIFFx2 m2, m3, m6, m7, m4, 3, r1, r2
  933. RET
  934. ;-----------------------------------------------------------------------------
  935. ; void vp8_luma_dc_wht_mmxext(DCTELEM block[4][4][16], DCTELEM dc[16])
  936. ;-----------------------------------------------------------------------------
  937. %macro SCATTER_WHT 1
  938. pextrw r1d, m0, %1
  939. pextrw r2d, m1, %1
  940. mov [r0+2*16*0], r1w
  941. mov [r0+2*16*1], r2w
  942. pextrw r1d, m2, %1
  943. pextrw r2d, m3, %1
  944. mov [r0+2*16*2], r1w
  945. mov [r0+2*16*3], r2w
  946. %endmacro
  947. %macro HADAMARD4_1D 4
  948. SUMSUB_BADC m%2, m%1, m%4, m%3
  949. SUMSUB_BADC m%4, m%2, m%3, m%1
  950. SWAP %1, %4, %3
  951. %endmacro
  952. INIT_MMX
  953. cglobal vp8_luma_dc_wht_mmxext, 2,3
  954. movq m0, [r1]
  955. movq m1, [r1+8]
  956. movq m2, [r1+16]
  957. movq m3, [r1+24]
  958. HADAMARD4_1D 0, 1, 2, 3
  959. TRANSPOSE4x4W 0, 1, 2, 3, 4
  960. paddw m0, [pw_3]
  961. HADAMARD4_1D 0, 1, 2, 3
  962. psraw m0, 3
  963. psraw m1, 3
  964. psraw m2, 3
  965. psraw m3, 3
  966. SCATTER_WHT 0
  967. add r0, 2*16*4
  968. SCATTER_WHT 1
  969. add r0, 2*16*4
  970. SCATTER_WHT 2
  971. add r0, 2*16*4
  972. SCATTER_WHT 3
  973. RET
  974. ;-----------------------------------------------------------------------------
  975. ; void vp8_h/v_loop_filter_simple_<opt>(uint8_t *dst, int stride, int flim);
  976. ;-----------------------------------------------------------------------------
  977. ; macro called with 7 mm register indexes as argument, and 4 regular registers
  978. ;
  979. ; first 4 mm registers will carry the transposed pixel data
  980. ; the other three are scratchspace (one would be sufficient, but this allows
  981. ; for more spreading/pipelining and thus faster execution on OOE CPUs)
  982. ;
  983. ; first two regular registers are buf+4*stride and buf+5*stride
  984. ; third is -stride, fourth is +stride
  985. %macro READ_8x4_INTERLEAVED 11
  986. ; interleave 8 (A-H) rows of 4 pixels each
  987. movd m%1, [%8+%10*4] ; A0-3
  988. movd m%5, [%9+%10*4] ; B0-3
  989. movd m%2, [%8+%10*2] ; C0-3
  990. movd m%6, [%8+%10] ; D0-3
  991. movd m%3, [%8] ; E0-3
  992. movd m%7, [%9] ; F0-3
  993. movd m%4, [%9+%11] ; G0-3
  994. punpcklbw m%1, m%5 ; A/B interleaved
  995. movd m%5, [%9+%11*2] ; H0-3
  996. punpcklbw m%2, m%6 ; C/D interleaved
  997. punpcklbw m%3, m%7 ; E/F interleaved
  998. punpcklbw m%4, m%5 ; G/H interleaved
  999. %endmacro
  1000. ; macro called with 7 mm register indexes as argument, and 5 regular registers
  1001. ; first 11 mean the same as READ_8x4_TRANSPOSED above
  1002. ; fifth regular register is scratchspace to reach the bottom 8 rows, it
  1003. ; will be set to second regular register + 8*stride at the end
  1004. %macro READ_16x4_INTERLEAVED 12
  1005. ; transpose 16 (A-P) rows of 4 pixels each
  1006. lea %12, [r0+8*r2]
  1007. ; read (and interleave) those addressable by %8 (=r0), A/C/D/E/I/K/L/M
  1008. movd m%1, [%8+%10*4] ; A0-3
  1009. movd m%3, [%12+%10*4] ; I0-3
  1010. movd m%2, [%8+%10*2] ; C0-3
  1011. movd m%4, [%12+%10*2] ; K0-3
  1012. movd m%6, [%8+%10] ; D0-3
  1013. movd m%5, [%12+%10] ; L0-3
  1014. movd m%7, [%12] ; M0-3
  1015. add %12, %11
  1016. punpcklbw m%1, m%3 ; A/I
  1017. movd m%3, [%8] ; E0-3
  1018. punpcklbw m%2, m%4 ; C/K
  1019. punpcklbw m%6, m%5 ; D/L
  1020. punpcklbw m%3, m%7 ; E/M
  1021. punpcklbw m%2, m%6 ; C/D/K/L interleaved
  1022. ; read (and interleave) those addressable by %9 (=r4), B/F/G/H/J/N/O/P
  1023. movd m%5, [%9+%10*4] ; B0-3
  1024. movd m%4, [%12+%10*4] ; J0-3
  1025. movd m%7, [%9] ; F0-3
  1026. movd m%6, [%12] ; N0-3
  1027. punpcklbw m%5, m%4 ; B/J
  1028. punpcklbw m%7, m%6 ; F/N
  1029. punpcklbw m%1, m%5 ; A/B/I/J interleaved
  1030. punpcklbw m%3, m%7 ; E/F/M/N interleaved
  1031. movd m%4, [%9+%11] ; G0-3
  1032. movd m%6, [%12+%11] ; O0-3
  1033. movd m%5, [%9+%11*2] ; H0-3
  1034. movd m%7, [%12+%11*2] ; P0-3
  1035. punpcklbw m%4, m%6 ; G/O
  1036. punpcklbw m%5, m%7 ; H/P
  1037. punpcklbw m%4, m%5 ; G/H/O/P interleaved
  1038. %endmacro
  1039. ; write 4 mm registers of 2 dwords each
  1040. ; first four arguments are mm register indexes containing source data
  1041. ; last four are registers containing buf+4*stride, buf+5*stride,
  1042. ; -stride and +stride
  1043. %macro WRITE_4x2D 8
  1044. ; write out (2 dwords per register)
  1045. movd [%5+%7*4], m%1
  1046. movd [%5+%7*2], m%2
  1047. movd [%5], m%3
  1048. movd [%6+%8], m%4
  1049. punpckhdq m%1, m%1
  1050. punpckhdq m%2, m%2
  1051. punpckhdq m%3, m%3
  1052. punpckhdq m%4, m%4
  1053. movd [%6+%7*4], m%1
  1054. movd [%5+%7], m%2
  1055. movd [%6], m%3
  1056. movd [%6+%8*2], m%4
  1057. %endmacro
  1058. ; write 4 xmm registers of 4 dwords each
  1059. ; arguments same as WRITE_2x4D, but with an extra register, so that the 5 regular
  1060. ; registers contain buf+4*stride, buf+5*stride, buf+12*stride, -stride and +stride
  1061. ; we add 1*stride to the third regular registry in the process
  1062. %macro WRITE_4x4D 9
  1063. ; write out (4 dwords per register), start with dwords zero
  1064. movd [%5+%8*4], m%1
  1065. movd [%5], m%2
  1066. movd [%5+%9*4], m%3
  1067. movd [%5+%9*8], m%4
  1068. ; store dwords 1
  1069. psrldq m%1, 4
  1070. psrldq m%2, 4
  1071. psrldq m%3, 4
  1072. psrldq m%4, 4
  1073. movd [%6+%8*4], m%1
  1074. movd [%6], m%2
  1075. movd [%6+%9*4], m%3
  1076. movd [%6+%9*8], m%4
  1077. ; write dwords 2
  1078. psrldq m%1, 4
  1079. psrldq m%2, 4
  1080. psrldq m%3, 4
  1081. psrldq m%4, 4
  1082. movd [%5+%8*2], m%1
  1083. movd [%6+%9], m%2
  1084. movd [%7+%8*2], m%3
  1085. movd [%7+%9*2], m%4
  1086. add %7, %9
  1087. ; store dwords 3
  1088. psrldq m%1, 4
  1089. psrldq m%2, 4
  1090. psrldq m%3, 4
  1091. psrldq m%4, 4
  1092. movd [%5+%8], m%1
  1093. movd [%6+%9*2], m%2
  1094. movd [%7+%8*2], m%3
  1095. movd [%7+%9*2], m%4
  1096. %endmacro
  1097. %macro SPLATB_REG 3
  1098. movd %1, %2
  1099. punpcklbw %1, %1
  1100. %if mmsize == 16 ; sse2
  1101. punpcklwd %1, %1
  1102. pshufd %1, %1, 0x0
  1103. %elifidn %3, mmx
  1104. punpcklwd %1, %1
  1105. punpckldq %1, %1
  1106. %else ; mmxext
  1107. pshufw %1, %1, 0x0
  1108. %endif
  1109. %endmacro
  1110. %macro SIMPLE_LOOPFILTER 3
  1111. cglobal vp8_%2_loop_filter_simple_%1, 3, %3
  1112. %ifidn %2, h
  1113. mov r5, rsp ; backup stack pointer
  1114. and rsp, ~(mmsize-1) ; align stack
  1115. %endif
  1116. %if mmsize == 8 ; mmx/mmxext
  1117. mov r3, 2
  1118. %endif
  1119. SPLATB_REG m7, r2, %1 ; splat "flim" into register
  1120. ; set up indexes to address 4 rows
  1121. mov r2, r1
  1122. neg r1
  1123. %ifidn %2, h
  1124. lea r0, [r0+4*r2-2]
  1125. sub rsp, mmsize*2 ; (aligned) storage space for saving p1/q1
  1126. %endif
  1127. %if mmsize == 8 ; mmx / mmxext
  1128. .next8px
  1129. %endif
  1130. %ifidn %2, v
  1131. ; read 4 half/full rows of pixels
  1132. mova m0, [r0+r1*2] ; p1
  1133. mova m1, [r0+r1] ; p0
  1134. mova m2, [r0] ; q0
  1135. mova m3, [r0+r2] ; q1
  1136. %else ; h
  1137. lea r4, [r0+r2]
  1138. %if mmsize == 8 ; mmx/mmxext
  1139. READ_8x4_INTERLEAVED 0, 1, 2, 3, 4, 5, 6, r0, r4, r1, r2
  1140. %else ; sse2
  1141. READ_16x4_INTERLEAVED 0, 1, 2, 3, 4, 5, 6, r0, r4, r1, r2, r3
  1142. %endif
  1143. TRANSPOSE4x4W 0, 1, 2, 3, 4
  1144. mova [rsp], m0 ; store p1
  1145. mova [rsp+mmsize], m3 ; store q1
  1146. %endif
  1147. ; simple_limit
  1148. mova m5, m2 ; m5=backup of q0
  1149. mova m6, m1 ; m6=backup of p0
  1150. psubusb m1, m2 ; p0-q0
  1151. psubusb m2, m6 ; q0-p0
  1152. por m1, m2 ; FFABS(p0-q0)
  1153. paddusb m1, m1 ; m1=FFABS(p0-q0)*2
  1154. mova m4, m3
  1155. mova m2, m0
  1156. psubusb m3, m0 ; q1-p1
  1157. psubusb m0, m4 ; p1-q1
  1158. por m3, m0 ; FFABS(p1-q1)
  1159. mova m0, [pb_80]
  1160. pxor m2, m0
  1161. pxor m4, m0
  1162. psubsb m2, m4 ; m2=p1-q1 (signed) backup for below
  1163. pand m3, [pb_FE]
  1164. psrlq m3, 1 ; m3=FFABS(p1-q1)/2, this can be used signed
  1165. paddusb m3, m1
  1166. psubusb m3, m7
  1167. pxor m1, m1
  1168. pcmpeqb m3, m1 ; abs(p0-q0)*2+abs(p1-q1)/2<=flim mask(0xff/0x0)
  1169. ; filter_common (use m2/p1-q1, m4=q0, m6=p0, m5/q0-p0 and m3/mask)
  1170. mova m4, m5
  1171. pxor m5, m0
  1172. pxor m0, m6
  1173. psubsb m5, m0 ; q0-p0 (signed)
  1174. paddsb m2, m5
  1175. paddsb m2, m5
  1176. paddsb m2, m5 ; a=(p1-q1) + 3*(q0-p0)
  1177. pand m2, m3 ; apply filter mask (m3)
  1178. mova m3, [pb_F8]
  1179. mova m1, m2
  1180. paddsb m2, [pb_4] ; f1<<3=a+4
  1181. paddsb m1, [pb_3] ; f2<<3=a+3
  1182. pand m2, m3
  1183. pand m1, m3 ; cache f2<<3
  1184. pxor m0, m0
  1185. pxor m3, m3
  1186. pcmpgtb m0, m2 ; which values are <0?
  1187. psubb m3, m2 ; -f1<<3
  1188. psrlq m2, 3 ; +f1
  1189. psrlq m3, 3 ; -f1
  1190. pand m3, m0
  1191. pandn m0, m2
  1192. psubusb m4, m0
  1193. paddusb m4, m3 ; q0-f1
  1194. pxor m0, m0
  1195. pxor m3, m3
  1196. pcmpgtb m0, m1 ; which values are <0?
  1197. psubb m3, m1 ; -f2<<3
  1198. psrlq m1, 3 ; +f2
  1199. psrlq m3, 3 ; -f2
  1200. pand m3, m0
  1201. pandn m0, m1
  1202. paddusb m6, m0
  1203. psubusb m6, m3 ; p0+f2
  1204. ; store
  1205. %ifidn %2, v
  1206. mova [r0], m4
  1207. mova [r0+r1], m6
  1208. %else ; h
  1209. mova m0, [rsp] ; p1
  1210. SWAP 2, 4 ; p0
  1211. SWAP 1, 6 ; q0
  1212. mova m3, [rsp+mmsize] ; q1
  1213. TRANSPOSE4x4B 0, 1, 2, 3, 4
  1214. %if mmsize == 16 ; sse2
  1215. add r3, r1 ; change from r4*8*stride to r0+8*stride
  1216. WRITE_4x4D 0, 1, 2, 3, r0, r4, r3, r1, r2
  1217. %else ; mmx/mmxext
  1218. WRITE_4x2D 0, 1, 2, 3, r0, r4, r1, r2
  1219. %endif
  1220. %endif
  1221. %if mmsize == 8 ; mmx/mmxext
  1222. ; next 8 pixels
  1223. %ifidn %2, v
  1224. add r0, 8 ; advance 8 cols = pixels
  1225. %else ; h
  1226. lea r0, [r0+r2*8] ; advance 8 rows = lines
  1227. %endif
  1228. dec r3
  1229. jg .next8px
  1230. %ifidn %2, v
  1231. REP_RET
  1232. %else ; h
  1233. mov rsp, r5 ; restore stack pointer
  1234. RET
  1235. %endif
  1236. %else ; sse2
  1237. %ifidn %2, h
  1238. mov rsp, r5 ; restore stack pointer
  1239. %endif
  1240. RET
  1241. %endif
  1242. %endmacro
  1243. INIT_MMX
  1244. SIMPLE_LOOPFILTER mmx, v, 4
  1245. SIMPLE_LOOPFILTER mmx, h, 6
  1246. SIMPLE_LOOPFILTER mmxext, v, 4
  1247. SIMPLE_LOOPFILTER mmxext, h, 6
  1248. INIT_XMM
  1249. SIMPLE_LOOPFILTER sse2, v, 3
  1250. SIMPLE_LOOPFILTER sse2, h, 6
  1251. ;-----------------------------------------------------------------------------
  1252. ; void vp8_h/v_loop_filter<size>_inner_<opt>(uint8_t *dst, int stride,
  1253. ; int flimE, int flimI, int hev_thr);
  1254. ;-----------------------------------------------------------------------------
  1255. %macro INNER_LOOPFILTER 4
  1256. cglobal vp8_%2_loop_filter16y_inner_%1, 5, %3, %4
  1257. %define dst_reg r0
  1258. %define mstride_reg r1
  1259. %define E_reg r2
  1260. %define I_reg r3
  1261. %define hev_thr_reg r4
  1262. %ifdef m8 ; x86-64, sse2
  1263. %define dst8_reg r4
  1264. %elif mmsize == 16 ; x86-32, sse2
  1265. %define dst8_reg r5
  1266. %else ; x86-32, mmx/mmxext
  1267. %define cnt_reg r5
  1268. %endif
  1269. %define stride_reg E_reg
  1270. %define dst2_reg I_reg
  1271. %ifndef m8
  1272. %define stack_reg hev_thr_reg
  1273. %endif
  1274. %ifndef m8 ; mmx/mmxext or sse2 on x86-32
  1275. ; splat function arguments
  1276. SPLATB_REG m0, E_reg, %1 ; E
  1277. SPLATB_REG m1, I_reg, %1 ; I
  1278. SPLATB_REG m2, hev_thr_reg, %1 ; hev_thresh
  1279. ; align stack
  1280. mov stack_reg, rsp ; backup stack pointer
  1281. and rsp, ~(mmsize-1) ; align stack
  1282. %ifidn %2, v
  1283. sub rsp, mmsize * 4 ; stack layout: [0]=E, [1]=I, [2]=hev_thr
  1284. ; [3]=hev() result
  1285. %else ; h
  1286. sub rsp, mmsize * 6 ; extra storage space for transposes
  1287. %endif
  1288. %define flim_E [rsp]
  1289. %define flim_I [rsp+mmsize]
  1290. %define hev_thr [rsp+mmsize*2]
  1291. %define mask_res [rsp+mmsize*3]
  1292. mova flim_E, m0
  1293. mova flim_I, m1
  1294. mova hev_thr, m2
  1295. %else ; sse2 on x86-64
  1296. %define flim_E m9
  1297. %define flim_I m10
  1298. %define hev_thr m11
  1299. %define mask_res m12
  1300. ; splat function arguments
  1301. SPLATB_REG flim_E, E_reg, %1 ; E
  1302. SPLATB_REG flim_I, I_reg, %1 ; I
  1303. SPLATB_REG hev_thr, hev_thr_reg, %1 ; hev_thresh
  1304. %endif
  1305. %if mmsize == 8 ; mmx/mmxext
  1306. mov cnt_reg, 2
  1307. %endif
  1308. mov stride_reg, mstride_reg
  1309. neg mstride_reg
  1310. %ifidn %2, h
  1311. lea dst_reg, [dst_reg + stride_reg*4-4]
  1312. %endif
  1313. %if mmsize == 8
  1314. .next8px
  1315. %endif
  1316. ; read
  1317. lea dst2_reg, [dst_reg + stride_reg]
  1318. %ifidn %2, v
  1319. mova m0, [dst_reg +mstride_reg*4] ; p3
  1320. mova m1, [dst2_reg+mstride_reg*4] ; p2
  1321. mova m2, [dst_reg +mstride_reg*2] ; p1
  1322. mova m5, [dst2_reg] ; q1
  1323. mova m6, [dst2_reg+ stride_reg] ; q2
  1324. mova m7, [dst2_reg+ stride_reg*2] ; q3
  1325. %elif mmsize == 8 ; mmx/mmxext (h)
  1326. ; read 8 rows of 8px each
  1327. movu m0, [dst_reg +mstride_reg*4]
  1328. movu m1, [dst2_reg+mstride_reg*4]
  1329. movu m2, [dst_reg +mstride_reg*2]
  1330. movu m3, [dst_reg +mstride_reg]
  1331. movu m4, [dst_reg]
  1332. movu m5, [dst2_reg]
  1333. movu m6, [dst2_reg+ stride_reg]
  1334. ; 8x8 transpose
  1335. TRANSPOSE4x4B 0, 1, 2, 3, 7
  1336. %ifdef m13
  1337. SWAP 1, 13
  1338. %else
  1339. mova [rsp+mmsize*4], m1
  1340. %endif
  1341. movu m7, [dst2_reg+ stride_reg*2]
  1342. TRANSPOSE4x4B 4, 5, 6, 7, 1
  1343. SBUTTERFLY dq, 0, 4, 1 ; p3/p2
  1344. SBUTTERFLY dq, 2, 6, 1 ; q0/q1
  1345. SBUTTERFLY dq, 3, 7, 1 ; q2/q3
  1346. %ifdef m13
  1347. SWAP 1, 13
  1348. SWAP 2, 13
  1349. %else
  1350. mova m1, [rsp+mmsize*4]
  1351. mova [rsp+mmsize*4], m2 ; store q0
  1352. %endif
  1353. SBUTTERFLY dq, 1, 5, 2 ; p1/p0
  1354. %ifdef m14
  1355. SWAP 5, 14
  1356. %else
  1357. mova [rsp+mmsize*5], m5 ; store p0
  1358. %endif
  1359. SWAP 1, 4
  1360. SWAP 2, 4
  1361. SWAP 6, 3
  1362. SWAP 5, 3
  1363. %else ; sse2 (h)
  1364. lea dst8_reg, [dst_reg + stride_reg*8]
  1365. ; read 16 rows of 8px each, interleave
  1366. movh m0, [dst_reg +mstride_reg*4]
  1367. movh m1, [dst8_reg+mstride_reg*4]
  1368. movh m2, [dst_reg +mstride_reg*2]
  1369. movh m5, [dst8_reg+mstride_reg*2]
  1370. movh m3, [dst_reg +mstride_reg]
  1371. movh m6, [dst8_reg+mstride_reg]
  1372. movh m4, [dst_reg]
  1373. movh m7, [dst8_reg]
  1374. punpcklbw m0, m1 ; A/I
  1375. punpcklbw m2, m5 ; C/K
  1376. punpcklbw m3, m6 ; D/L
  1377. punpcklbw m4, m7 ; E/M
  1378. add dst8_reg, stride_reg
  1379. movh m1, [dst2_reg+mstride_reg*4]
  1380. movh m6, [dst8_reg+mstride_reg*4]
  1381. movh m5, [dst2_reg]
  1382. movh m7, [dst8_reg]
  1383. punpcklbw m1, m6 ; B/J
  1384. punpcklbw m5, m7 ; F/N
  1385. movh m6, [dst2_reg+ stride_reg]
  1386. movh m7, [dst8_reg+ stride_reg]
  1387. punpcklbw m6, m7 ; G/O
  1388. ; 8x16 transpose
  1389. TRANSPOSE4x4B 0, 1, 2, 3, 7
  1390. %ifdef m13
  1391. SWAP 1, 13
  1392. %else
  1393. mova [rsp+mmsize*4], m1
  1394. %endif
  1395. movh m7, [dst2_reg+ stride_reg*2]
  1396. movh m1, [dst8_reg+ stride_reg*2]
  1397. punpcklbw m7, m1 ; H/P
  1398. TRANSPOSE4x4B 4, 5, 6, 7, 1
  1399. SBUTTERFLY dq, 0, 4, 1 ; p3/p2
  1400. SBUTTERFLY dq, 2, 6, 1 ; q0/q1
  1401. SBUTTERFLY dq, 3, 7, 1 ; q2/q3
  1402. %ifdef m13
  1403. SWAP 1, 13
  1404. SWAP 2, 13
  1405. %else
  1406. mova m1, [rsp+mmsize*4]
  1407. mova [rsp+mmsize*4], m2 ; store q0
  1408. %endif
  1409. SBUTTERFLY dq, 1, 5, 2 ; p1/p0
  1410. %ifdef m14
  1411. SWAP 5, 14
  1412. %else
  1413. mova [rsp+mmsize*5], m5 ; store p0
  1414. %endif
  1415. SWAP 1, 4
  1416. SWAP 2, 4
  1417. SWAP 6, 3
  1418. SWAP 5, 3
  1419. %endif
  1420. ; normal_limit for p3-p2, p2-p1, q3-q2 and q2-q1
  1421. mova m4, m1
  1422. SWAP 4, 1
  1423. psubusb m4, m0 ; p2-p3
  1424. psubusb m0, m1 ; p3-p2
  1425. por m0, m4 ; abs(p3-p2)
  1426. mova m4, m2
  1427. SWAP 4, 2
  1428. psubusb m4, m1 ; p1-p2
  1429. psubusb m1, m2 ; p2-p1
  1430. por m1, m4 ; abs(p2-p1)
  1431. mova m4, m6
  1432. SWAP 4, 6
  1433. psubusb m4, m7 ; q2-q3
  1434. psubusb m7, m6 ; q3-q2
  1435. por m7, m4 ; abs(q3-q2)
  1436. mova m4, m5
  1437. SWAP 4, 5
  1438. psubusb m4, m6 ; q1-q2
  1439. psubusb m6, m5 ; q2-q1
  1440. por m6, m4 ; abs(q2-q1)
  1441. %ifidn %1, mmx
  1442. %ifdef m10
  1443. SWAP 4, 10
  1444. %else
  1445. mova m4, [rsp+mmsize]
  1446. %endif
  1447. pxor m3, m3
  1448. psubusb m0, m4
  1449. psubusb m1, m4
  1450. psubusb m7, m4
  1451. psubusb m6, m4
  1452. pcmpeqb m0, m3 ; abs(p3-p2) <= I
  1453. pcmpeqb m1, m3 ; abs(p2-p1) <= I
  1454. pcmpeqb m7, m3 ; abs(q3-q2) <= I
  1455. pcmpeqb m6, m3 ; abs(q2-q1) <= I
  1456. pand m0, m1
  1457. pand m7, m6
  1458. pand m0, m7
  1459. %else ; mmxext/sse2
  1460. pmaxub m0, m1
  1461. pmaxub m6, m7
  1462. pmaxub m0, m6
  1463. %endif
  1464. ; normal_limit and high_edge_variance for p1-p0, q1-q0
  1465. SWAP 7, 3 ; now m7 is zero
  1466. %ifidn %2, v
  1467. mova m3, [dst_reg +mstride_reg] ; p0
  1468. %elifdef m14
  1469. SWAP 3, 14
  1470. %else
  1471. mova m3, [rsp+mmsize*5]
  1472. %endif
  1473. mova m1, m2
  1474. SWAP 1, 2
  1475. mova m6, m3
  1476. SWAP 3, 6
  1477. psubusb m1, m3 ; p1-p0
  1478. psubusb m6, m2 ; p0-p1
  1479. por m1, m6 ; abs(p1-p0)
  1480. %ifidn %1, mmx
  1481. mova m6, m1
  1482. psubusb m1, m4
  1483. psubusb m6, hev_thr
  1484. pcmpeqb m1, m7 ; abs(p1-p0) <= I
  1485. pcmpeqb m6, m7 ; abs(p1-p0) <= hev_thresh
  1486. pand m0, m1
  1487. %ifdef m12
  1488. SWAP 6, 12
  1489. %else
  1490. mova [rsp+mmsize*3], m6
  1491. %endif
  1492. %else ; mmxext/sse2
  1493. pmaxub m0, m1 ; max_I
  1494. SWAP 1, 4 ; max_hev_thresh
  1495. %endif
  1496. SWAP 6, 4 ; now m6 is I
  1497. %ifidn %2, v
  1498. mova m4, [dst_reg] ; q0
  1499. %elifdef m13
  1500. SWAP 4, 13
  1501. %else
  1502. mova m4, [rsp+mmsize*4]
  1503. %endif
  1504. mova m1, m4
  1505. SWAP 1, 4
  1506. mova m7, m5
  1507. SWAP 7, 5
  1508. psubusb m1, m5 ; q0-q1
  1509. psubusb m7, m4 ; q1-q0
  1510. por m1, m7 ; abs(q1-q0)
  1511. %ifidn %1, mmx
  1512. mova m7, m1
  1513. psubusb m1, m6
  1514. psubusb m7, hev_thr
  1515. pxor m6, m6
  1516. pcmpeqb m1, m6 ; abs(q1-q0) <= I
  1517. pcmpeqb m7, m6 ; abs(q1-q0) <= hev_thresh
  1518. %ifdef m12
  1519. SWAP 6, 12
  1520. %else
  1521. mova m6, [rsp+mmsize*3]
  1522. %endif
  1523. pand m0, m1 ; abs([pq][321]-[pq][210]) <= I
  1524. pand m6, m7
  1525. %else ; mmxext/sse2
  1526. pxor m7, m7
  1527. pmaxub m0, m1
  1528. pmaxub m6, m1
  1529. psubusb m0, flim_I
  1530. psubusb m6, hev_thr
  1531. pcmpeqb m0, m7 ; max(abs(..)) <= I
  1532. pcmpeqb m6, m7 ; !(max(abs..) > thresh)
  1533. %endif
  1534. %ifdef m12
  1535. SWAP 6, 12
  1536. %else
  1537. mova [rsp+mmsize*3], m6 ; !(abs(p1-p0) > hev_t || abs(q1-q0) > hev_t)
  1538. %endif
  1539. ; simple_limit
  1540. mova m1, m3
  1541. SWAP 1, 3
  1542. mova m6, m4 ; keep copies of p0/q0 around for later use
  1543. SWAP 6, 4
  1544. psubusb m1, m4 ; p0-q0
  1545. psubusb m6, m3 ; q0-p0
  1546. por m1, m6 ; abs(q0-p0)
  1547. paddusb m1, m1 ; m1=2*abs(q0-p0)
  1548. mova m7, m2
  1549. SWAP 7, 2
  1550. mova m6, m5
  1551. SWAP 6, 5
  1552. psubusb m7, m5 ; p1-q1
  1553. psubusb m6, m2 ; q1-p1
  1554. por m7, m6 ; abs(q1-p1)
  1555. pxor m6, m6
  1556. pand m7, [pb_FE]
  1557. psrlq m7, 1 ; abs(q1-p1)/2
  1558. paddusb m7, m1 ; abs(q0-p0)*2+abs(q1-p1)/2
  1559. psubusb m7, flim_E
  1560. pcmpeqb m7, m6 ; abs(q0-p0)*2+abs(q1-p1)/2 <= E
  1561. pand m0, m7 ; normal_limit result
  1562. ; filter_common; at this point, m2-m5=p1-q1 and m0 is filter_mask
  1563. %ifdef m8 ; x86-64 && sse2
  1564. mova m8, [pb_80]
  1565. %define pb_80_var m8
  1566. %else ; x86-32 or mmx/mmxext
  1567. %define pb_80_var [pb_80]
  1568. %endif
  1569. mova m1, m4
  1570. mova m7, m3
  1571. pxor m1, pb_80_var
  1572. pxor m7, pb_80_var
  1573. psubsb m1, m7 ; (signed) q0-p0
  1574. mova m6, m2
  1575. mova m7, m5
  1576. pxor m6, pb_80_var
  1577. pxor m7, pb_80_var
  1578. psubsb m6, m7 ; (signed) p1-q1
  1579. mova m7, mask_res
  1580. pandn m7, m6
  1581. paddsb m7, m1
  1582. paddsb m7, m1
  1583. paddsb m7, m1 ; 3*(q0-p0)+is4tap?(p1-q1)
  1584. pand m7, m0
  1585. mova m1, [pb_F8]
  1586. mova m6, m7
  1587. paddsb m7, [pb_3]
  1588. paddsb m6, [pb_4]
  1589. pand m7, m1
  1590. pand m6, m1
  1591. pxor m1, m1
  1592. pxor m0, m0
  1593. pcmpgtb m1, m7
  1594. psubb m0, m7
  1595. psrlq m7, 3 ; +f2
  1596. psrlq m0, 3 ; -f2
  1597. pand m0, m1
  1598. pandn m1, m7
  1599. psubusb m3, m0
  1600. paddusb m3, m1 ; p0+f2
  1601. pxor m1, m1
  1602. pxor m0, m0
  1603. pcmpgtb m0, m6
  1604. psubb m1, m6
  1605. psrlq m6, 3 ; +f1
  1606. psrlq m1, 3 ; -f1
  1607. pand m1, m0
  1608. pandn m0, m6
  1609. psubusb m4, m0
  1610. paddusb m4, m1 ; q0-f1
  1611. %ifdef m12
  1612. SWAP 6, 12
  1613. %else
  1614. mova m6, [rsp+mmsize*3]
  1615. %endif
  1616. %ifidn %1, mmx
  1617. mova m7, [pb_1]
  1618. %else ; mmxext/sse2
  1619. pxor m7, m7
  1620. %endif
  1621. pand m0, m6
  1622. pand m1, m6
  1623. %ifidn %1, mmx
  1624. paddusb m0, m7
  1625. pand m1, [pb_FE]
  1626. pandn m7, m0
  1627. psrlq m1, 1
  1628. psrlq m7, 1
  1629. SWAP 0, 7
  1630. %else ; mmxext/sse2
  1631. psubusb m1, [pb_1]
  1632. pavgb m0, m7 ; a
  1633. pavgb m1, m7 ; -a
  1634. %endif
  1635. psubusb m5, m0
  1636. psubusb m2, m1
  1637. paddusb m5, m1 ; q1-a
  1638. paddusb m2, m0 ; p1+a
  1639. ; store
  1640. %ifidn %2, v
  1641. mova [dst_reg+mstride_reg*2], m2
  1642. mova [dst_reg+mstride_reg ], m3
  1643. mova [dst_reg], m4
  1644. mova [dst_reg+ stride_reg ], m5
  1645. %else ; h
  1646. add dst_reg, 2
  1647. add dst2_reg, 2
  1648. ; 4x8/16 transpose
  1649. TRANSPOSE4x4B 2, 3, 4, 5, 6
  1650. %if mmsize == 8 ; mmx/mmxext (h)
  1651. WRITE_4x2D 2, 3, 4, 5, dst_reg, dst2_reg, mstride_reg, stride_reg
  1652. %else ; sse2 (h)
  1653. lea dst8_reg, [dst8_reg+mstride_reg+2]
  1654. WRITE_4x4D 2, 3, 4, 5, dst_reg, dst2_reg, dst8_reg, mstride_reg, stride_reg
  1655. %endif
  1656. %endif
  1657. %if mmsize == 8
  1658. %ifidn %2, h
  1659. lea dst_reg, [dst_reg + stride_reg*8-2]
  1660. %else ; v
  1661. add dst_reg, 8
  1662. %endif
  1663. dec cnt_reg
  1664. jg .next8px
  1665. %endif
  1666. %ifndef m8 ; sse2 on x86-32 or mmx/mmxext
  1667. mov rsp, stack_reg ; restore stack pointer
  1668. %endif
  1669. RET
  1670. %endmacro
  1671. INIT_MMX
  1672. INNER_LOOPFILTER mmx, v, 6, 8
  1673. INNER_LOOPFILTER mmx, h, 6, 8
  1674. INNER_LOOPFILTER mmxext, v, 6, 8
  1675. INNER_LOOPFILTER mmxext, h, 6, 8
  1676. INIT_XMM
  1677. INNER_LOOPFILTER sse2, v, 5, 13
  1678. %ifdef m8
  1679. INNER_LOOPFILTER sse2, h, 5, 15
  1680. %else
  1681. INNER_LOOPFILTER sse2, h, 6, 15
  1682. %endif