You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2776 lines
77KB

  1. ;******************************************************************************
  2. ;* VP8 MMXEXT optimizations
  3. ;* Copyright (c) 2010 Ronald S. Bultje <rsbultje@gmail.com>
  4. ;* Copyright (c) 2010 Jason Garrett-Glaser <darkshikari@gmail.com>
  5. ;*
  6. ;* This file is part of FFmpeg.
  7. ;*
  8. ;* FFmpeg is free software; you can redistribute it and/or
  9. ;* modify it under the terms of the GNU Lesser General Public
  10. ;* License as published by the Free Software Foundation; either
  11. ;* version 2.1 of the License, or (at your option) any later version.
  12. ;*
  13. ;* FFmpeg is distributed in the hope that it will be useful,
  14. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. ;* Lesser General Public License for more details.
  17. ;*
  18. ;* You should have received a copy of the GNU Lesser General Public
  19. ;* License along with FFmpeg; if not, write to the Free Software
  20. ;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. ;******************************************************************************
  22. %include "x86inc.asm"
  23. %include "x86util.asm"
  24. SECTION_RODATA
  25. fourtap_filter_hw_m: times 4 dw -6, 123
  26. times 4 dw 12, -1
  27. times 4 dw -9, 93
  28. times 4 dw 50, -6
  29. times 4 dw -6, 50
  30. times 4 dw 93, -9
  31. times 4 dw -1, 12
  32. times 4 dw 123, -6
  33. sixtap_filter_hw_m: times 4 dw 2, -11
  34. times 4 dw 108, 36
  35. times 4 dw -8, 1
  36. times 4 dw 3, -16
  37. times 4 dw 77, 77
  38. times 4 dw -16, 3
  39. times 4 dw 1, -8
  40. times 4 dw 36, 108
  41. times 4 dw -11, 2
  42. fourtap_filter_hb_m: times 8 db -6, 123
  43. times 8 db 12, -1
  44. times 8 db -9, 93
  45. times 8 db 50, -6
  46. times 8 db -6, 50
  47. times 8 db 93, -9
  48. times 8 db -1, 12
  49. times 8 db 123, -6
  50. sixtap_filter_hb_m: times 8 db 2, 1
  51. times 8 db -11, 108
  52. times 8 db 36, -8
  53. times 8 db 3, 3
  54. times 8 db -16, 77
  55. times 8 db 77, -16
  56. times 8 db 1, 2
  57. times 8 db -8, 36
  58. times 8 db 108, -11
  59. fourtap_filter_v_m: times 8 dw -6
  60. times 8 dw 123
  61. times 8 dw 12
  62. times 8 dw -1
  63. times 8 dw -9
  64. times 8 dw 93
  65. times 8 dw 50
  66. times 8 dw -6
  67. times 8 dw -6
  68. times 8 dw 50
  69. times 8 dw 93
  70. times 8 dw -9
  71. times 8 dw -1
  72. times 8 dw 12
  73. times 8 dw 123
  74. times 8 dw -6
  75. sixtap_filter_v_m: times 8 dw 2
  76. times 8 dw -11
  77. times 8 dw 108
  78. times 8 dw 36
  79. times 8 dw -8
  80. times 8 dw 1
  81. times 8 dw 3
  82. times 8 dw -16
  83. times 8 dw 77
  84. times 8 dw 77
  85. times 8 dw -16
  86. times 8 dw 3
  87. times 8 dw 1
  88. times 8 dw -8
  89. times 8 dw 36
  90. times 8 dw 108
  91. times 8 dw -11
  92. times 8 dw 2
  93. bilinear_filter_vw_m: times 8 dw 1
  94. times 8 dw 2
  95. times 8 dw 3
  96. times 8 dw 4
  97. times 8 dw 5
  98. times 8 dw 6
  99. times 8 dw 7
  100. bilinear_filter_vb_m: times 8 db 7, 1
  101. times 8 db 6, 2
  102. times 8 db 5, 3
  103. times 8 db 4, 4
  104. times 8 db 3, 5
  105. times 8 db 2, 6
  106. times 8 db 1, 7
  107. %ifdef PIC
  108. %define fourtap_filter_hw r11
  109. %define sixtap_filter_hw r11
  110. %define fourtap_filter_hb r11
  111. %define sixtap_filter_hb r11
  112. %define fourtap_filter_v r11
  113. %define sixtap_filter_v r11
  114. %define bilinear_filter_vw r11
  115. %define bilinear_filter_vb r11
  116. %else
  117. %define fourtap_filter_hw fourtap_filter_hw_m
  118. %define sixtap_filter_hw sixtap_filter_hw_m
  119. %define fourtap_filter_hb fourtap_filter_hb_m
  120. %define sixtap_filter_hb sixtap_filter_hb_m
  121. %define fourtap_filter_v fourtap_filter_v_m
  122. %define sixtap_filter_v sixtap_filter_v_m
  123. %define bilinear_filter_vw bilinear_filter_vw_m
  124. %define bilinear_filter_vb bilinear_filter_vb_m
  125. %endif
  126. filter_h2_shuf: db 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8
  127. filter_h4_shuf: db 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10
  128. filter_h6_shuf1: db 0, 5, 1, 6, 2, 7, 3, 8, 4, 9, 5, 10, 6, 11, 7, 12
  129. filter_h6_shuf2: db 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9
  130. filter_h6_shuf3: db 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11
  131. pw_20091: times 4 dw 20091
  132. pw_17734: times 4 dw 17734
  133. cextern pb_1
  134. cextern pw_3
  135. cextern pb_3
  136. cextern pw_4
  137. cextern pb_4
  138. cextern pw_9
  139. cextern pw_18
  140. cextern pw_27
  141. cextern pw_63
  142. cextern pw_64
  143. cextern pb_80
  144. cextern pb_F8
  145. cextern pb_FE
  146. SECTION .text
  147. ;-----------------------------------------------------------------------------
  148. ; subpel MC functions:
  149. ;
  150. ; void put_vp8_epel<size>_h<htap>v<vtap>_<opt>(uint8_t *dst, int deststride,
  151. ; uint8_t *src, int srcstride,
  152. ; int height, int mx, int my);
  153. ;-----------------------------------------------------------------------------
  154. %macro FILTER_SSSE3 3
  155. cglobal put_vp8_epel%1_h6_ssse3, 6, 6, %2
  156. lea r5d, [r5*3]
  157. mova m3, [filter_h6_shuf2]
  158. mova m4, [filter_h6_shuf3]
  159. %ifdef PIC
  160. lea r11, [sixtap_filter_hb_m]
  161. %endif
  162. mova m5, [sixtap_filter_hb+r5*8-48] ; set up 6tap filter in bytes
  163. mova m6, [sixtap_filter_hb+r5*8-32]
  164. mova m7, [sixtap_filter_hb+r5*8-16]
  165. .nextrow
  166. movu m0, [r2-2]
  167. mova m1, m0
  168. mova m2, m0
  169. %ifidn %1, 4
  170. ; For epel4, we need 9 bytes, but only 8 get loaded; to compensate, do the
  171. ; shuffle with a memory operand
  172. punpcklbw m0, [r2+3]
  173. %else
  174. pshufb m0, [filter_h6_shuf1]
  175. %endif
  176. pshufb m1, m3
  177. pshufb m2, m4
  178. pmaddubsw m0, m5
  179. pmaddubsw m1, m6
  180. pmaddubsw m2, m7
  181. paddsw m0, m1
  182. paddsw m0, m2
  183. paddsw m0, [pw_64]
  184. psraw m0, 7
  185. packuswb m0, m0
  186. movh [r0], m0 ; store
  187. ; go to next line
  188. add r0, r1
  189. add r2, r3
  190. dec r4 ; next row
  191. jg .nextrow
  192. REP_RET
  193. cglobal put_vp8_epel%1_h4_ssse3, 6, 6, %3
  194. shl r5d, 4
  195. mova m2, [pw_64]
  196. mova m3, [filter_h2_shuf]
  197. mova m4, [filter_h4_shuf]
  198. %ifdef PIC
  199. lea r11, [fourtap_filter_hb_m]
  200. %endif
  201. mova m5, [fourtap_filter_hb+r5-16] ; set up 4tap filter in bytes
  202. mova m6, [fourtap_filter_hb+r5]
  203. .nextrow
  204. movu m0, [r2-1]
  205. mova m1, m0
  206. pshufb m0, m3
  207. pshufb m1, m4
  208. pmaddubsw m0, m5
  209. pmaddubsw m1, m6
  210. paddsw m0, m2
  211. paddsw m0, m1
  212. psraw m0, 7
  213. packuswb m0, m0
  214. movh [r0], m0 ; store
  215. ; go to next line
  216. add r0, r1
  217. add r2, r3
  218. dec r4 ; next row
  219. jg .nextrow
  220. REP_RET
  221. cglobal put_vp8_epel%1_v4_ssse3, 7, 7, %2
  222. shl r6d, 4
  223. %ifdef PIC
  224. lea r11, [fourtap_filter_hb_m]
  225. %endif
  226. mova m5, [fourtap_filter_hb+r6-16]
  227. mova m6, [fourtap_filter_hb+r6]
  228. mova m7, [pw_64]
  229. ; read 3 lines
  230. sub r2, r3
  231. movh m0, [r2]
  232. movh m1, [r2+ r3]
  233. movh m2, [r2+2*r3]
  234. add r2, r3
  235. .nextrow
  236. movh m3, [r2+2*r3] ; read new row
  237. mova m4, m0
  238. mova m0, m1
  239. punpcklbw m4, m1
  240. mova m1, m2
  241. punpcklbw m2, m3
  242. pmaddubsw m4, m5
  243. pmaddubsw m2, m6
  244. paddsw m4, m2
  245. mova m2, m3
  246. paddsw m4, m7
  247. psraw m4, 7
  248. packuswb m4, m4
  249. movh [r0], m4
  250. ; go to next line
  251. add r0, r1
  252. add r2, r3
  253. dec r4 ; next row
  254. jg .nextrow
  255. REP_RET
  256. cglobal put_vp8_epel%1_v6_ssse3, 7, 7, %2
  257. lea r6d, [r6*3]
  258. %ifdef PIC
  259. lea r11, [sixtap_filter_hb_m]
  260. %endif
  261. lea r6, [sixtap_filter_hb+r6*8]
  262. ; read 5 lines
  263. sub r2, r3
  264. sub r2, r3
  265. movh m0, [r2]
  266. movh m1, [r2+r3]
  267. movh m2, [r2+r3*2]
  268. lea r2, [r2+r3*2]
  269. add r2, r3
  270. movh m3, [r2]
  271. movh m4, [r2+r3]
  272. .nextrow
  273. movh m5, [r2+2*r3] ; read new row
  274. mova m6, m0
  275. punpcklbw m6, m5
  276. mova m0, m1
  277. punpcklbw m1, m2
  278. mova m7, m3
  279. punpcklbw m7, m4
  280. pmaddubsw m6, [r6-48]
  281. pmaddubsw m1, [r6-32]
  282. pmaddubsw m7, [r6-16]
  283. paddsw m6, m1
  284. paddsw m6, m7
  285. mova m1, m2
  286. paddsw m6, [pw_64]
  287. mova m2, m3
  288. psraw m6, 7
  289. mova m3, m4
  290. packuswb m6, m6
  291. mova m4, m5
  292. movh [r0], m6
  293. ; go to next line
  294. add r0, r1
  295. add r2, r3
  296. dec r4 ; next row
  297. jg .nextrow
  298. REP_RET
  299. %endmacro
  300. INIT_MMX
  301. FILTER_SSSE3 4, 0, 0
  302. INIT_XMM
  303. FILTER_SSSE3 8, 8, 7
  304. ; 4x4 block, H-only 4-tap filter
  305. cglobal put_vp8_epel4_h4_mmxext, 6, 6
  306. shl r5d, 4
  307. %ifdef PIC
  308. lea r11, [fourtap_filter_hw_m]
  309. %endif
  310. movq mm4, [fourtap_filter_hw+r5-16] ; set up 4tap filter in words
  311. movq mm5, [fourtap_filter_hw+r5]
  312. movq mm7, [pw_64]
  313. pxor mm6, mm6
  314. .nextrow
  315. movq mm1, [r2-1] ; (ABCDEFGH) load 8 horizontal pixels
  316. ; first set of 2 pixels
  317. movq mm2, mm1 ; byte ABCD..
  318. punpcklbw mm1, mm6 ; byte->word ABCD
  319. pshufw mm0, mm2, 9 ; byte CDEF..
  320. punpcklbw mm0, mm6 ; byte->word CDEF
  321. pshufw mm3, mm1, 0x94 ; word ABBC
  322. pshufw mm1, mm0, 0x94 ; word CDDE
  323. pmaddwd mm3, mm4 ; multiply 2px with F0/F1
  324. movq mm0, mm1 ; backup for second set of pixels
  325. pmaddwd mm1, mm5 ; multiply 2px with F2/F3
  326. paddd mm3, mm1 ; finish 1st 2px
  327. ; second set of 2 pixels, use backup of above
  328. punpckhbw mm2, mm6 ; byte->word EFGH
  329. pmaddwd mm0, mm4 ; multiply backed up 2px with F0/F1
  330. pshufw mm1, mm2, 0x94 ; word EFFG
  331. pmaddwd mm1, mm5 ; multiply 2px with F2/F3
  332. paddd mm0, mm1 ; finish 2nd 2px
  333. ; merge two sets of 2 pixels into one set of 4, round/clip/store
  334. packssdw mm3, mm0 ; merge dword->word (4px)
  335. paddsw mm3, mm7 ; rounding
  336. psraw mm3, 7
  337. packuswb mm3, mm6 ; clip and word->bytes
  338. movd [r0], mm3 ; store
  339. ; go to next line
  340. add r0, r1
  341. add r2, r3
  342. dec r4 ; next row
  343. jg .nextrow
  344. REP_RET
  345. ; 4x4 block, H-only 6-tap filter
  346. cglobal put_vp8_epel4_h6_mmxext, 6, 6
  347. lea r5d, [r5*3]
  348. %ifdef PIC
  349. lea r11, [sixtap_filter_hw_m]
  350. %endif
  351. movq mm4, [sixtap_filter_hw+r5*8-48] ; set up 4tap filter in words
  352. movq mm5, [sixtap_filter_hw+r5*8-32]
  353. movq mm6, [sixtap_filter_hw+r5*8-16]
  354. movq mm7, [pw_64]
  355. pxor mm3, mm3
  356. .nextrow
  357. movq mm1, [r2-2] ; (ABCDEFGH) load 8 horizontal pixels
  358. ; first set of 2 pixels
  359. movq mm2, mm1 ; byte ABCD..
  360. punpcklbw mm1, mm3 ; byte->word ABCD
  361. pshufw mm0, mm2, 0x9 ; byte CDEF..
  362. punpckhbw mm2, mm3 ; byte->word EFGH
  363. punpcklbw mm0, mm3 ; byte->word CDEF
  364. pshufw mm1, mm1, 0x94 ; word ABBC
  365. pshufw mm2, mm2, 0x94 ; word EFFG
  366. pmaddwd mm1, mm4 ; multiply 2px with F0/F1
  367. pshufw mm3, mm0, 0x94 ; word CDDE
  368. movq mm0, mm3 ; backup for second set of pixels
  369. pmaddwd mm3, mm5 ; multiply 2px with F2/F3
  370. paddd mm1, mm3 ; add to 1st 2px cache
  371. movq mm3, mm2 ; backup for second set of pixels
  372. pmaddwd mm2, mm6 ; multiply 2px with F4/F5
  373. paddd mm1, mm2 ; finish 1st 2px
  374. ; second set of 2 pixels, use backup of above
  375. movd mm2, [r2+3] ; byte FGHI (prevent overreads)
  376. pmaddwd mm0, mm4 ; multiply 1st backed up 2px with F0/F1
  377. pmaddwd mm3, mm5 ; multiply 2nd backed up 2px with F2/F3
  378. paddd mm0, mm3 ; add to 2nd 2px cache
  379. pxor mm3, mm3
  380. punpcklbw mm2, mm3 ; byte->word FGHI
  381. pshufw mm2, mm2, 0xE9 ; word GHHI
  382. pmaddwd mm2, mm6 ; multiply 2px with F4/F5
  383. paddd mm0, mm2 ; finish 2nd 2px
  384. ; merge two sets of 2 pixels into one set of 4, round/clip/store
  385. packssdw mm1, mm0 ; merge dword->word (4px)
  386. paddsw mm1, mm7 ; rounding
  387. psraw mm1, 7
  388. packuswb mm1, mm3 ; clip and word->bytes
  389. movd [r0], mm1 ; store
  390. ; go to next line
  391. add r0, r1
  392. add r2, r3
  393. dec r4 ; next row
  394. jg .nextrow
  395. REP_RET
  396. INIT_XMM
  397. cglobal put_vp8_epel8_h4_sse2, 6, 6, 10
  398. shl r5d, 5
  399. %ifdef PIC
  400. lea r11, [fourtap_filter_v_m]
  401. %endif
  402. lea r5, [fourtap_filter_v+r5-32]
  403. pxor m7, m7
  404. mova m4, [pw_64]
  405. mova m5, [r5+ 0]
  406. mova m6, [r5+16]
  407. %ifdef m8
  408. mova m8, [r5+32]
  409. mova m9, [r5+48]
  410. %endif
  411. .nextrow
  412. movq m0, [r2-1]
  413. movq m1, [r2-0]
  414. movq m2, [r2+1]
  415. movq m3, [r2+2]
  416. punpcklbw m0, m7
  417. punpcklbw m1, m7
  418. punpcklbw m2, m7
  419. punpcklbw m3, m7
  420. pmullw m0, m5
  421. pmullw m1, m6
  422. %ifdef m8
  423. pmullw m2, m8
  424. pmullw m3, m9
  425. %else
  426. pmullw m2, [r5+32]
  427. pmullw m3, [r5+48]
  428. %endif
  429. paddsw m0, m1
  430. paddsw m2, m3
  431. paddsw m0, m2
  432. paddsw m0, m4
  433. psraw m0, 7
  434. packuswb m0, m7
  435. movh [r0], m0 ; store
  436. ; go to next line
  437. add r0, r1
  438. add r2, r3
  439. dec r4 ; next row
  440. jg .nextrow
  441. REP_RET
  442. cglobal put_vp8_epel8_h6_sse2, 6, 6, 14
  443. lea r5d, [r5*3]
  444. shl r5d, 4
  445. %ifdef PIC
  446. lea r11, [sixtap_filter_v_m]
  447. %endif
  448. lea r5, [sixtap_filter_v+r5-96]
  449. pxor m7, m7
  450. mova m6, [pw_64]
  451. %ifdef m8
  452. mova m8, [r5+ 0]
  453. mova m9, [r5+16]
  454. mova m10, [r5+32]
  455. mova m11, [r5+48]
  456. mova m12, [r5+64]
  457. mova m13, [r5+80]
  458. %endif
  459. .nextrow
  460. movq m0, [r2-2]
  461. movq m1, [r2-1]
  462. movq m2, [r2-0]
  463. movq m3, [r2+1]
  464. movq m4, [r2+2]
  465. movq m5, [r2+3]
  466. punpcklbw m0, m7
  467. punpcklbw m1, m7
  468. punpcklbw m2, m7
  469. punpcklbw m3, m7
  470. punpcklbw m4, m7
  471. punpcklbw m5, m7
  472. %ifdef m8
  473. pmullw m0, m8
  474. pmullw m1, m9
  475. pmullw m2, m10
  476. pmullw m3, m11
  477. pmullw m4, m12
  478. pmullw m5, m13
  479. %else
  480. pmullw m0, [r5+ 0]
  481. pmullw m1, [r5+16]
  482. pmullw m2, [r5+32]
  483. pmullw m3, [r5+48]
  484. pmullw m4, [r5+64]
  485. pmullw m5, [r5+80]
  486. %endif
  487. paddsw m1, m4
  488. paddsw m0, m5
  489. paddsw m1, m2
  490. paddsw m0, m3
  491. paddsw m0, m1
  492. paddsw m0, m6
  493. psraw m0, 7
  494. packuswb m0, m7
  495. movh [r0], m0 ; store
  496. ; go to next line
  497. add r0, r1
  498. add r2, r3
  499. dec r4 ; next row
  500. jg .nextrow
  501. REP_RET
  502. %macro FILTER_V 3
  503. ; 4x4 block, V-only 4-tap filter
  504. cglobal put_vp8_epel%2_v4_%1, 7, 7, %3
  505. shl r6d, 5
  506. %ifdef PIC
  507. lea r11, [fourtap_filter_v_m]
  508. %endif
  509. lea r6, [fourtap_filter_v+r6-32]
  510. mova m6, [pw_64]
  511. pxor m7, m7
  512. mova m5, [r6+48]
  513. ; read 3 lines
  514. sub r2, r3
  515. movh m0, [r2]
  516. movh m1, [r2+ r3]
  517. movh m2, [r2+2*r3]
  518. add r2, r3
  519. punpcklbw m0, m7
  520. punpcklbw m1, m7
  521. punpcklbw m2, m7
  522. .nextrow
  523. ; first calculate negative taps (to prevent losing positive overflows)
  524. movh m4, [r2+2*r3] ; read new row
  525. punpcklbw m4, m7
  526. mova m3, m4
  527. pmullw m0, [r6+0]
  528. pmullw m4, m5
  529. paddsw m4, m0
  530. ; then calculate positive taps
  531. mova m0, m1
  532. pmullw m1, [r6+16]
  533. paddsw m4, m1
  534. mova m1, m2
  535. pmullw m2, [r6+32]
  536. paddsw m4, m2
  537. mova m2, m3
  538. ; round/clip/store
  539. paddsw m4, m6
  540. psraw m4, 7
  541. packuswb m4, m7
  542. movh [r0], m4
  543. ; go to next line
  544. add r0, r1
  545. add r2, r3
  546. dec r4 ; next row
  547. jg .nextrow
  548. REP_RET
  549. ; 4x4 block, V-only 6-tap filter
  550. cglobal put_vp8_epel%2_v6_%1, 7, 7, %3
  551. shl r6d, 4
  552. lea r6, [r6*3]
  553. %ifdef PIC
  554. lea r11, [sixtap_filter_v_m]
  555. %endif
  556. lea r6, [sixtap_filter_v+r6-96]
  557. pxor m7, m7
  558. ; read 5 lines
  559. sub r2, r3
  560. sub r2, r3
  561. movh m0, [r2]
  562. movh m1, [r2+r3]
  563. movh m2, [r2+r3*2]
  564. lea r2, [r2+r3*2]
  565. add r2, r3
  566. movh m3, [r2]
  567. movh m4, [r2+r3]
  568. punpcklbw m0, m7
  569. punpcklbw m1, m7
  570. punpcklbw m2, m7
  571. punpcklbw m3, m7
  572. punpcklbw m4, m7
  573. .nextrow
  574. ; first calculate negative taps (to prevent losing positive overflows)
  575. mova m5, m1
  576. pmullw m5, [r6+16]
  577. mova m6, m4
  578. pmullw m6, [r6+64]
  579. paddsw m6, m5
  580. ; then calculate positive taps
  581. movh m5, [r2+2*r3] ; read new row
  582. punpcklbw m5, m7
  583. pmullw m0, [r6+0]
  584. paddsw m6, m0
  585. mova m0, m1
  586. mova m1, m2
  587. pmullw m2, [r6+32]
  588. paddsw m6, m2
  589. mova m2, m3
  590. pmullw m3, [r6+48]
  591. paddsw m6, m3
  592. mova m3, m4
  593. mova m4, m5
  594. pmullw m5, [r6+80]
  595. paddsw m6, m5
  596. ; round/clip/store
  597. paddsw m6, [pw_64]
  598. psraw m6, 7
  599. packuswb m6, m7
  600. movh [r0], m6
  601. ; go to next line
  602. add r0, r1
  603. add r2, r3
  604. dec r4 ; next row
  605. jg .nextrow
  606. REP_RET
  607. %endmacro
  608. INIT_MMX
  609. FILTER_V mmxext, 4, 0
  610. INIT_XMM
  611. FILTER_V sse2, 8, 8
  612. %macro FILTER_BILINEAR 3
  613. cglobal put_vp8_bilinear%2_v_%1, 7,7,%3
  614. mov r5d, 8*16
  615. shl r6d, 4
  616. sub r5d, r6d
  617. %ifdef PIC
  618. lea r11, [bilinear_filter_vw_m]
  619. %endif
  620. pxor m6, m6
  621. mova m4, [bilinear_filter_vw+r5-16]
  622. mova m5, [bilinear_filter_vw+r6-16]
  623. .nextrow
  624. movh m0, [r2+r3*0]
  625. movh m1, [r2+r3*1]
  626. movh m3, [r2+r3*2]
  627. punpcklbw m0, m6
  628. punpcklbw m1, m6
  629. punpcklbw m3, m6
  630. mova m2, m1
  631. pmullw m0, m4
  632. pmullw m1, m5
  633. pmullw m2, m4
  634. pmullw m3, m5
  635. paddsw m0, m1
  636. paddsw m2, m3
  637. psraw m0, 2
  638. psraw m2, 2
  639. pavgw m0, m6
  640. pavgw m2, m6
  641. %ifidn %1, mmxext
  642. packuswb m0, m0
  643. packuswb m2, m2
  644. movh [r0+r1*0], m0
  645. movh [r0+r1*1], m2
  646. %else
  647. packuswb m0, m2
  648. movh [r0+r1*0], m0
  649. movhps [r0+r1*1], m0
  650. %endif
  651. lea r0, [r0+r1*2]
  652. lea r2, [r2+r3*2]
  653. sub r4, 2
  654. jg .nextrow
  655. REP_RET
  656. cglobal put_vp8_bilinear%2_h_%1, 7,7,%3
  657. mov r6d, 8*16
  658. shl r5d, 4
  659. sub r6d, r5d
  660. %ifdef PIC
  661. lea r11, [bilinear_filter_vw_m]
  662. %endif
  663. pxor m6, m6
  664. mova m4, [bilinear_filter_vw+r6-16]
  665. mova m5, [bilinear_filter_vw+r5-16]
  666. .nextrow
  667. movh m0, [r2+r3*0+0]
  668. movh m1, [r2+r3*0+1]
  669. movh m2, [r2+r3*1+0]
  670. movh m3, [r2+r3*1+1]
  671. punpcklbw m0, m6
  672. punpcklbw m1, m6
  673. punpcklbw m2, m6
  674. punpcklbw m3, m6
  675. pmullw m0, m4
  676. pmullw m1, m5
  677. pmullw m2, m4
  678. pmullw m3, m5
  679. paddsw m0, m1
  680. paddsw m2, m3
  681. psraw m0, 2
  682. psraw m2, 2
  683. pavgw m0, m6
  684. pavgw m2, m6
  685. %ifidn %1, mmxext
  686. packuswb m0, m0
  687. packuswb m2, m2
  688. movh [r0+r1*0], m0
  689. movh [r0+r1*1], m2
  690. %else
  691. packuswb m0, m2
  692. movh [r0+r1*0], m0
  693. movhps [r0+r1*1], m0
  694. %endif
  695. lea r0, [r0+r1*2]
  696. lea r2, [r2+r3*2]
  697. sub r4, 2
  698. jg .nextrow
  699. REP_RET
  700. %endmacro
  701. INIT_MMX
  702. FILTER_BILINEAR mmxext, 4, 0
  703. INIT_XMM
  704. FILTER_BILINEAR sse2, 8, 7
  705. %macro FILTER_BILINEAR_SSSE3 1
  706. cglobal put_vp8_bilinear%1_v_ssse3, 7,7
  707. shl r6d, 4
  708. %ifdef PIC
  709. lea r11, [bilinear_filter_vb_m]
  710. %endif
  711. pxor m4, m4
  712. mova m3, [bilinear_filter_vb+r6-16]
  713. .nextrow
  714. movh m0, [r2+r3*0]
  715. movh m1, [r2+r3*1]
  716. movh m2, [r2+r3*2]
  717. punpcklbw m0, m1
  718. punpcklbw m1, m2
  719. pmaddubsw m0, m3
  720. pmaddubsw m1, m3
  721. psraw m0, 2
  722. psraw m1, 2
  723. pavgw m0, m4
  724. pavgw m1, m4
  725. %if mmsize==8
  726. packuswb m0, m0
  727. packuswb m1, m1
  728. movh [r0+r1*0], m0
  729. movh [r0+r1*1], m1
  730. %else
  731. packuswb m0, m1
  732. movh [r0+r1*0], m0
  733. movhps [r0+r1*1], m0
  734. %endif
  735. lea r0, [r0+r1*2]
  736. lea r2, [r2+r3*2]
  737. sub r4, 2
  738. jg .nextrow
  739. REP_RET
  740. cglobal put_vp8_bilinear%1_h_ssse3, 7,7
  741. shl r5d, 4
  742. %ifdef PIC
  743. lea r11, [bilinear_filter_vb_m]
  744. %endif
  745. pxor m4, m4
  746. mova m2, [filter_h2_shuf]
  747. mova m3, [bilinear_filter_vb+r5-16]
  748. .nextrow
  749. movu m0, [r2+r3*0]
  750. movu m1, [r2+r3*1]
  751. pshufb m0, m2
  752. pshufb m1, m2
  753. pmaddubsw m0, m3
  754. pmaddubsw m1, m3
  755. psraw m0, 2
  756. psraw m1, 2
  757. pavgw m0, m4
  758. pavgw m1, m4
  759. %if mmsize==8
  760. packuswb m0, m0
  761. packuswb m1, m1
  762. movh [r0+r1*0], m0
  763. movh [r0+r1*1], m1
  764. %else
  765. packuswb m0, m1
  766. movh [r0+r1*0], m0
  767. movhps [r0+r1*1], m0
  768. %endif
  769. lea r0, [r0+r1*2]
  770. lea r2, [r2+r3*2]
  771. sub r4, 2
  772. jg .nextrow
  773. REP_RET
  774. %endmacro
  775. INIT_MMX
  776. FILTER_BILINEAR_SSSE3 4
  777. INIT_XMM
  778. FILTER_BILINEAR_SSSE3 8
  779. cglobal put_vp8_pixels8_mmx, 5,5
  780. .nextrow:
  781. movq mm0, [r2+r3*0]
  782. movq mm1, [r2+r3*1]
  783. lea r2, [r2+r3*2]
  784. movq [r0+r1*0], mm0
  785. movq [r0+r1*1], mm1
  786. lea r0, [r0+r1*2]
  787. sub r4d, 2
  788. jg .nextrow
  789. REP_RET
  790. cglobal put_vp8_pixels16_mmx, 5,5
  791. .nextrow:
  792. movq mm0, [r2+r3*0+0]
  793. movq mm1, [r2+r3*0+8]
  794. movq mm2, [r2+r3*1+0]
  795. movq mm3, [r2+r3*1+8]
  796. lea r2, [r2+r3*2]
  797. movq [r0+r1*0+0], mm0
  798. movq [r0+r1*0+8], mm1
  799. movq [r0+r1*1+0], mm2
  800. movq [r0+r1*1+8], mm3
  801. lea r0, [r0+r1*2]
  802. sub r4d, 2
  803. jg .nextrow
  804. REP_RET
  805. cglobal put_vp8_pixels16_sse, 5,5,2
  806. .nextrow:
  807. movups xmm0, [r2+r3*0]
  808. movups xmm1, [r2+r3*1]
  809. lea r2, [r2+r3*2]
  810. movaps [r0+r1*0], xmm0
  811. movaps [r0+r1*1], xmm1
  812. lea r0, [r0+r1*2]
  813. sub r4d, 2
  814. jg .nextrow
  815. REP_RET
  816. ;-----------------------------------------------------------------------------
  817. ; void vp8_idct_dc_add_<opt>(uint8_t *dst, DCTELEM block[16], int stride);
  818. ;-----------------------------------------------------------------------------
  819. %macro ADD_DC 4
  820. %4 m2, [r0+%3]
  821. %4 m3, [r0+r2+%3]
  822. %4 m4, [r1+%3]
  823. %4 m5, [r1+r2+%3]
  824. paddusb m2, %1
  825. paddusb m3, %1
  826. paddusb m4, %1
  827. paddusb m5, %1
  828. psubusb m2, %2
  829. psubusb m3, %2
  830. psubusb m4, %2
  831. psubusb m5, %2
  832. %4 [r0+%3], m2
  833. %4 [r0+r2+%3], m3
  834. %4 [r1+%3], m4
  835. %4 [r1+r2+%3], m5
  836. %endmacro
  837. INIT_MMX
  838. cglobal vp8_idct_dc_add_mmx, 3, 3
  839. ; load data
  840. movd m0, [r1]
  841. ; calculate DC
  842. paddw m0, [pw_4]
  843. pxor m1, m1
  844. psraw m0, 3
  845. movd [r1], m1
  846. psubw m1, m0
  847. packuswb m0, m0
  848. packuswb m1, m1
  849. punpcklbw m0, m0
  850. punpcklbw m1, m1
  851. punpcklwd m0, m0
  852. punpcklwd m1, m1
  853. ; add DC
  854. lea r1, [r0+r2*2]
  855. ADD_DC m0, m1, 0, movh
  856. RET
  857. INIT_XMM
  858. cglobal vp8_idct_dc_add_sse4, 3, 3, 6
  859. ; load data
  860. movd m0, [r1]
  861. pxor m1, m1
  862. ; calculate DC
  863. paddw m0, [pw_4]
  864. movd [r1], m1
  865. lea r1, [r0+r2*2]
  866. movd m2, [r0]
  867. movd m3, [r0+r2]
  868. movd m4, [r1]
  869. movd m5, [r1+r2]
  870. psraw m0, 3
  871. pshuflw m0, m0, 0
  872. punpcklqdq m0, m0
  873. punpckldq m2, m3
  874. punpckldq m4, m5
  875. punpcklbw m2, m1
  876. punpcklbw m4, m1
  877. paddw m2, m0
  878. paddw m4, m0
  879. packuswb m2, m4
  880. movd [r0], m2
  881. pextrd [r0+r2], m2, 1
  882. pextrd [r1], m2, 2
  883. pextrd [r1+r2], m2, 3
  884. RET
  885. ;-----------------------------------------------------------------------------
  886. ; void vp8_idct_dc_add4y_<opt>(uint8_t *dst, DCTELEM block[4][16], int stride);
  887. ;-----------------------------------------------------------------------------
  888. INIT_MMX
  889. cglobal vp8_idct_dc_add4y_mmx, 3, 3
  890. ; load data
  891. movd m0, [r1+32*0] ; A
  892. movd m1, [r1+32*2] ; C
  893. punpcklwd m0, [r1+32*1] ; A B
  894. punpcklwd m1, [r1+32*3] ; C D
  895. punpckldq m0, m1 ; A B C D
  896. pxor m6, m6
  897. ; calculate DC
  898. paddw m0, [pw_4]
  899. movd [r1+32*0], m6
  900. movd [r1+32*1], m6
  901. movd [r1+32*2], m6
  902. movd [r1+32*3], m6
  903. psraw m0, 3
  904. psubw m6, m0
  905. packuswb m0, m0
  906. packuswb m6, m6
  907. punpcklbw m0, m0 ; AABBCCDD
  908. punpcklbw m6, m6 ; AABBCCDD
  909. movq m1, m0
  910. movq m7, m6
  911. punpcklbw m0, m0 ; AAAABBBB
  912. punpckhbw m1, m1 ; CCCCDDDD
  913. punpcklbw m6, m6 ; AAAABBBB
  914. punpckhbw m7, m7 ; CCCCDDDD
  915. ; add DC
  916. lea r1, [r0+r2*2]
  917. ADD_DC m0, m6, 0, mova
  918. ADD_DC m1, m7, 8, mova
  919. RET
  920. INIT_XMM
  921. cglobal vp8_idct_dc_add4y_sse2, 3, 3, 6
  922. ; load data
  923. movd m0, [r1+32*0] ; A
  924. movd m1, [r1+32*2] ; C
  925. punpcklwd m0, [r1+32*1] ; A B
  926. punpcklwd m1, [r1+32*3] ; C D
  927. punpckldq m0, m1 ; A B C D
  928. pxor m1, m1
  929. ; calculate DC
  930. paddw m0, [pw_4]
  931. movd [r1+32*0], m1
  932. movd [r1+32*1], m1
  933. movd [r1+32*2], m1
  934. movd [r1+32*3], m1
  935. psraw m0, 3
  936. psubw m1, m0
  937. packuswb m0, m0
  938. packuswb m1, m1
  939. punpcklbw m0, m0
  940. punpcklbw m1, m1
  941. punpcklbw m0, m0
  942. punpcklbw m1, m1
  943. ; add DC
  944. lea r1, [r0+r2*2]
  945. ADD_DC m0, m1, 0, mova
  946. RET
  947. ;-----------------------------------------------------------------------------
  948. ; void vp8_idct_dc_add4uv_<opt>(uint8_t *dst, DCTELEM block[4][16], int stride);
  949. ;-----------------------------------------------------------------------------
  950. INIT_MMX
  951. cglobal vp8_idct_dc_add4uv_mmx, 3, 3
  952. ; load data
  953. movd m0, [r1+32*0] ; A
  954. movd m1, [r1+32*2] ; C
  955. punpcklwd m0, [r1+32*1] ; A B
  956. punpcklwd m1, [r1+32*3] ; C D
  957. punpckldq m0, m1 ; A B C D
  958. pxor m6, m6
  959. ; calculate DC
  960. paddw m0, [pw_4]
  961. movd [r1+32*0], m6
  962. movd [r1+32*1], m6
  963. movd [r1+32*2], m6
  964. movd [r1+32*3], m6
  965. psraw m0, 3
  966. psubw m6, m0
  967. packuswb m0, m0
  968. packuswb m6, m6
  969. punpcklbw m0, m0 ; AABBCCDD
  970. punpcklbw m6, m6 ; AABBCCDD
  971. movq m1, m0
  972. movq m7, m6
  973. punpcklbw m0, m0 ; AAAABBBB
  974. punpckhbw m1, m1 ; CCCCDDDD
  975. punpcklbw m6, m6 ; AAAABBBB
  976. punpckhbw m7, m7 ; CCCCDDDD
  977. ; add DC
  978. lea r1, [r0+r2*2]
  979. ADD_DC m0, m6, 0, mova
  980. lea r0, [r0+r2*4]
  981. lea r1, [r1+r2*4]
  982. ADD_DC m1, m7, 0, mova
  983. RET
  984. ;-----------------------------------------------------------------------------
  985. ; void vp8_idct_add_<opt>(uint8_t *dst, DCTELEM block[16], int stride);
  986. ;-----------------------------------------------------------------------------
  987. ; calculate %1=mul_35468(%1)-mul_20091(%2); %2=mul_20091(%1)+mul_35468(%2)
  988. ; this macro assumes that m6/m7 have words for 20091/17734 loaded
  989. %macro VP8_MULTIPLY_SUMSUB 4
  990. mova %3, %1
  991. mova %4, %2
  992. pmulhw %3, m6 ;20091(1)
  993. pmulhw %4, m6 ;20091(2)
  994. paddw %3, %1
  995. paddw %4, %2
  996. paddw %1, %1
  997. paddw %2, %2
  998. pmulhw %1, m7 ;35468(1)
  999. pmulhw %2, m7 ;35468(2)
  1000. psubw %1, %4
  1001. paddw %2, %3
  1002. %endmacro
  1003. ; calculate x0=%1+%3; x1=%1-%3
  1004. ; x2=mul_35468(%2)-mul_20091(%4); x3=mul_20091(%2)+mul_35468(%4)
  1005. ; %1=x0+x3 (tmp0); %2=x1+x2 (tmp1); %3=x1-x2 (tmp2); %4=x0-x3 (tmp3)
  1006. ; %5/%6 are temporary registers
  1007. ; we assume m6/m7 have constant words 20091/17734 loaded in them
  1008. %macro VP8_IDCT_TRANSFORM4x4_1D 6
  1009. SUMSUB_BA m%3, m%1, m%5 ;t0, t1
  1010. VP8_MULTIPLY_SUMSUB m%2, m%4, m%5,m%6 ;t2, t3
  1011. SUMSUB_BA m%4, m%3, m%5 ;tmp0, tmp3
  1012. SUMSUB_BA m%2, m%1, m%5 ;tmp1, tmp2
  1013. SWAP %4, %1
  1014. SWAP %4, %3
  1015. %endmacro
  1016. INIT_MMX
  1017. %macro VP8_IDCT_ADD 1
  1018. cglobal vp8_idct_add_%1, 3, 3
  1019. ; load block data
  1020. movq m0, [r1+ 0]
  1021. movq m1, [r1+ 8]
  1022. movq m2, [r1+16]
  1023. movq m3, [r1+24]
  1024. movq m6, [pw_20091]
  1025. movq m7, [pw_17734]
  1026. %ifidn %1, sse
  1027. xorps xmm0, xmm0
  1028. movaps [r1+ 0], xmm0
  1029. movaps [r1+16], xmm0
  1030. %else
  1031. pxor m4, m4
  1032. movq [r1+ 0], m4
  1033. movq [r1+ 8], m4
  1034. movq [r1+16], m4
  1035. movq [r1+24], m4
  1036. %endif
  1037. ; actual IDCT
  1038. VP8_IDCT_TRANSFORM4x4_1D 0, 1, 2, 3, 4, 5
  1039. TRANSPOSE4x4W 0, 1, 2, 3, 4
  1040. paddw m0, [pw_4]
  1041. VP8_IDCT_TRANSFORM4x4_1D 0, 1, 2, 3, 4, 5
  1042. TRANSPOSE4x4W 0, 1, 2, 3, 4
  1043. ; store
  1044. pxor m4, m4
  1045. lea r1, [r0+2*r2]
  1046. STORE_DIFFx2 m0, m1, m6, m7, m4, 3, r0, r2
  1047. STORE_DIFFx2 m2, m3, m6, m7, m4, 3, r1, r2
  1048. RET
  1049. %endmacro
  1050. VP8_IDCT_ADD mmx
  1051. VP8_IDCT_ADD sse
  1052. ;-----------------------------------------------------------------------------
  1053. ; void vp8_luma_dc_wht_mmxext(DCTELEM block[4][4][16], DCTELEM dc[16])
  1054. ;-----------------------------------------------------------------------------
  1055. %macro SCATTER_WHT 3
  1056. movd r1d, m%1
  1057. movd r2d, m%2
  1058. mov [r0+2*16*(0+%3)], r1w
  1059. mov [r0+2*16*(1+%3)], r2w
  1060. shr r1d, 16
  1061. shr r2d, 16
  1062. psrlq m%1, 32
  1063. psrlq m%2, 32
  1064. mov [r0+2*16*(4+%3)], r1w
  1065. mov [r0+2*16*(5+%3)], r2w
  1066. movd r1d, m%1
  1067. movd r2d, m%2
  1068. mov [r0+2*16*(8+%3)], r1w
  1069. mov [r0+2*16*(9+%3)], r2w
  1070. shr r1d, 16
  1071. shr r2d, 16
  1072. mov [r0+2*16*(12+%3)], r1w
  1073. mov [r0+2*16*(13+%3)], r2w
  1074. %endmacro
  1075. %macro HADAMARD4_1D 4
  1076. SUMSUB_BADC m%2, m%1, m%4, m%3
  1077. SUMSUB_BADC m%4, m%2, m%3, m%1
  1078. SWAP %1, %4, %3
  1079. %endmacro
  1080. INIT_MMX
  1081. cglobal vp8_luma_dc_wht_mmx, 2,3
  1082. movq m0, [r1]
  1083. movq m1, [r1+8]
  1084. movq m2, [r1+16]
  1085. movq m3, [r1+24]
  1086. HADAMARD4_1D 0, 1, 2, 3
  1087. TRANSPOSE4x4W 0, 1, 2, 3, 4
  1088. paddw m0, [pw_3]
  1089. HADAMARD4_1D 0, 1, 2, 3
  1090. psraw m0, 3
  1091. psraw m1, 3
  1092. psraw m2, 3
  1093. psraw m3, 3
  1094. SCATTER_WHT 0, 1, 0
  1095. SCATTER_WHT 2, 3, 2
  1096. RET
  1097. ;-----------------------------------------------------------------------------
  1098. ; void vp8_h/v_loop_filter_simple_<opt>(uint8_t *dst, int stride, int flim);
  1099. ;-----------------------------------------------------------------------------
  1100. ; macro called with 7 mm register indexes as argument, and 4 regular registers
  1101. ;
  1102. ; first 4 mm registers will carry the transposed pixel data
  1103. ; the other three are scratchspace (one would be sufficient, but this allows
  1104. ; for more spreading/pipelining and thus faster execution on OOE CPUs)
  1105. ;
  1106. ; first two regular registers are buf+4*stride and buf+5*stride
  1107. ; third is -stride, fourth is +stride
  1108. %macro READ_8x4_INTERLEAVED 11
  1109. ; interleave 8 (A-H) rows of 4 pixels each
  1110. movd m%1, [%8+%10*4] ; A0-3
  1111. movd m%5, [%9+%10*4] ; B0-3
  1112. movd m%2, [%8+%10*2] ; C0-3
  1113. movd m%6, [%8+%10] ; D0-3
  1114. movd m%3, [%8] ; E0-3
  1115. movd m%7, [%9] ; F0-3
  1116. movd m%4, [%9+%11] ; G0-3
  1117. punpcklbw m%1, m%5 ; A/B interleaved
  1118. movd m%5, [%9+%11*2] ; H0-3
  1119. punpcklbw m%2, m%6 ; C/D interleaved
  1120. punpcklbw m%3, m%7 ; E/F interleaved
  1121. punpcklbw m%4, m%5 ; G/H interleaved
  1122. %endmacro
  1123. ; macro called with 7 mm register indexes as argument, and 5 regular registers
  1124. ; first 11 mean the same as READ_8x4_TRANSPOSED above
  1125. ; fifth regular register is scratchspace to reach the bottom 8 rows, it
  1126. ; will be set to second regular register + 8*stride at the end
  1127. %macro READ_16x4_INTERLEAVED 12
  1128. ; transpose 16 (A-P) rows of 4 pixels each
  1129. lea %12, [r0+8*r2]
  1130. ; read (and interleave) those addressable by %8 (=r0), A/C/D/E/I/K/L/M
  1131. movd m%1, [%8+%10*4] ; A0-3
  1132. movd m%3, [%12+%10*4] ; I0-3
  1133. movd m%2, [%8+%10*2] ; C0-3
  1134. movd m%4, [%12+%10*2] ; K0-3
  1135. movd m%6, [%8+%10] ; D0-3
  1136. movd m%5, [%12+%10] ; L0-3
  1137. movd m%7, [%12] ; M0-3
  1138. add %12, %11
  1139. punpcklbw m%1, m%3 ; A/I
  1140. movd m%3, [%8] ; E0-3
  1141. punpcklbw m%2, m%4 ; C/K
  1142. punpcklbw m%6, m%5 ; D/L
  1143. punpcklbw m%3, m%7 ; E/M
  1144. punpcklbw m%2, m%6 ; C/D/K/L interleaved
  1145. ; read (and interleave) those addressable by %9 (=r4), B/F/G/H/J/N/O/P
  1146. movd m%5, [%9+%10*4] ; B0-3
  1147. movd m%4, [%12+%10*4] ; J0-3
  1148. movd m%7, [%9] ; F0-3
  1149. movd m%6, [%12] ; N0-3
  1150. punpcklbw m%5, m%4 ; B/J
  1151. punpcklbw m%7, m%6 ; F/N
  1152. punpcklbw m%1, m%5 ; A/B/I/J interleaved
  1153. punpcklbw m%3, m%7 ; E/F/M/N interleaved
  1154. movd m%4, [%9+%11] ; G0-3
  1155. movd m%6, [%12+%11] ; O0-3
  1156. movd m%5, [%9+%11*2] ; H0-3
  1157. movd m%7, [%12+%11*2] ; P0-3
  1158. punpcklbw m%4, m%6 ; G/O
  1159. punpcklbw m%5, m%7 ; H/P
  1160. punpcklbw m%4, m%5 ; G/H/O/P interleaved
  1161. %endmacro
  1162. ; write 4 mm registers of 2 dwords each
  1163. ; first four arguments are mm register indexes containing source data
  1164. ; last four are registers containing buf+4*stride, buf+5*stride,
  1165. ; -stride and +stride
  1166. %macro WRITE_4x2D 8
  1167. ; write out (2 dwords per register)
  1168. movd [%5+%7*4], m%1
  1169. movd [%5+%7*2], m%2
  1170. movd [%5], m%3
  1171. movd [%6+%8], m%4
  1172. punpckhdq m%1, m%1
  1173. punpckhdq m%2, m%2
  1174. punpckhdq m%3, m%3
  1175. punpckhdq m%4, m%4
  1176. movd [%6+%7*4], m%1
  1177. movd [%5+%7], m%2
  1178. movd [%6], m%3
  1179. movd [%6+%8*2], m%4
  1180. %endmacro
  1181. ; write 4 xmm registers of 4 dwords each
  1182. ; arguments same as WRITE_2x4D, but with an extra register, so that the 5 regular
  1183. ; registers contain buf+4*stride, buf+5*stride, buf+12*stride, -stride and +stride
  1184. ; we add 1*stride to the third regular registry in the process
  1185. ; the 10th argument is 16 if it's a Y filter (i.e. all regular registers cover the
  1186. ; same memory region), or 8 if they cover two separate buffers (third one points to
  1187. ; a different memory region than the first two), allowing for more optimal code for
  1188. ; the 16-width case
  1189. %macro WRITE_4x4D 10
  1190. ; write out (4 dwords per register), start with dwords zero
  1191. movd [%5+%8*4], m%1
  1192. movd [%5], m%2
  1193. movd [%7+%8*4], m%3
  1194. movd [%7], m%4
  1195. ; store dwords 1
  1196. psrldq m%1, 4
  1197. psrldq m%2, 4
  1198. psrldq m%3, 4
  1199. psrldq m%4, 4
  1200. movd [%6+%8*4], m%1
  1201. movd [%6], m%2
  1202. %if %10 == 16
  1203. movd [%6+%9*4], m%3
  1204. %endif
  1205. movd [%7+%9], m%4
  1206. ; write dwords 2
  1207. psrldq m%1, 4
  1208. psrldq m%2, 4
  1209. %if %10 == 8
  1210. movd [%5+%8*2], m%1
  1211. movd %5, m%3
  1212. %endif
  1213. psrldq m%3, 4
  1214. psrldq m%4, 4
  1215. %if %10 == 16
  1216. movd [%5+%8*2], m%1
  1217. %endif
  1218. movd [%6+%9], m%2
  1219. movd [%7+%8*2], m%3
  1220. movd [%7+%9*2], m%4
  1221. add %7, %9
  1222. ; store dwords 3
  1223. psrldq m%1, 4
  1224. psrldq m%2, 4
  1225. psrldq m%3, 4
  1226. psrldq m%4, 4
  1227. %if %10 == 8
  1228. mov [%7+%8*4], %5d
  1229. movd [%6+%8*2], m%1
  1230. %else
  1231. movd [%5+%8], m%1
  1232. %endif
  1233. movd [%6+%9*2], m%2
  1234. movd [%7+%8*2], m%3
  1235. movd [%7+%9*2], m%4
  1236. %endmacro
  1237. %macro SPLATB_REG_MMX 2-3
  1238. movd %1, %2
  1239. punpcklbw %1, %1
  1240. punpcklwd %1, %1
  1241. punpckldq %1, %1
  1242. %endmacro
  1243. %macro SPLATB_REG_MMXEXT 2-3
  1244. movd %1, %2
  1245. punpcklbw %1, %1
  1246. pshufw %1, %1, 0x0
  1247. %endmacro
  1248. %macro SPLATB_REG_SSE2 2-3
  1249. movd %1, %2
  1250. punpcklbw %1, %1
  1251. pshuflw %1, %1, 0x0
  1252. punpcklqdq %1, %1
  1253. %endmacro
  1254. %macro SPLATB_REG_SSSE3 3
  1255. movd %1, %2
  1256. pshufb %1, %3
  1257. %endmacro
  1258. %macro SIMPLE_LOOPFILTER 3
  1259. cglobal vp8_%2_loop_filter_simple_%1, 3, %3
  1260. %ifidn %2, h
  1261. mov r5, rsp ; backup stack pointer
  1262. and rsp, ~(mmsize-1) ; align stack
  1263. %endif
  1264. %if mmsize == 8 ; mmx/mmxext
  1265. mov r3, 2
  1266. %endif
  1267. %ifnidn %1, sse2
  1268. %if mmsize == 16
  1269. pxor m0, m0
  1270. %endif
  1271. %endif
  1272. SPLATB_REG m7, r2, m0 ; splat "flim" into register
  1273. ; set up indexes to address 4 rows
  1274. mov r2, r1
  1275. neg r1
  1276. %ifidn %2, h
  1277. lea r0, [r0+4*r2-2]
  1278. sub rsp, mmsize*2 ; (aligned) storage space for saving p1/q1
  1279. %endif
  1280. %if mmsize == 8 ; mmx / mmxext
  1281. .next8px
  1282. %endif
  1283. %ifidn %2, v
  1284. ; read 4 half/full rows of pixels
  1285. mova m0, [r0+r1*2] ; p1
  1286. mova m1, [r0+r1] ; p0
  1287. mova m2, [r0] ; q0
  1288. mova m3, [r0+r2] ; q1
  1289. %else ; h
  1290. lea r4, [r0+r2]
  1291. %if mmsize == 8 ; mmx/mmxext
  1292. READ_8x4_INTERLEAVED 0, 1, 2, 3, 4, 5, 6, r0, r4, r1, r2
  1293. %else ; sse2
  1294. READ_16x4_INTERLEAVED 0, 1, 2, 3, 4, 5, 6, r0, r4, r1, r2, r3
  1295. %endif
  1296. TRANSPOSE4x4W 0, 1, 2, 3, 4
  1297. mova [rsp], m0 ; store p1
  1298. mova [rsp+mmsize], m3 ; store q1
  1299. %endif
  1300. ; simple_limit
  1301. mova m5, m2 ; m5=backup of q0
  1302. mova m6, m1 ; m6=backup of p0
  1303. psubusb m1, m2 ; p0-q0
  1304. psubusb m2, m6 ; q0-p0
  1305. por m1, m2 ; FFABS(p0-q0)
  1306. paddusb m1, m1 ; m1=FFABS(p0-q0)*2
  1307. mova m4, m3
  1308. mova m2, m0
  1309. psubusb m3, m0 ; q1-p1
  1310. psubusb m0, m4 ; p1-q1
  1311. por m3, m0 ; FFABS(p1-q1)
  1312. mova m0, [pb_80]
  1313. pxor m2, m0
  1314. pxor m4, m0
  1315. psubsb m2, m4 ; m2=p1-q1 (signed) backup for below
  1316. pand m3, [pb_FE]
  1317. psrlq m3, 1 ; m3=FFABS(p1-q1)/2, this can be used signed
  1318. paddusb m3, m1
  1319. psubusb m3, m7
  1320. pxor m1, m1
  1321. pcmpeqb m3, m1 ; abs(p0-q0)*2+abs(p1-q1)/2<=flim mask(0xff/0x0)
  1322. ; filter_common (use m2/p1-q1, m4=q0, m6=p0, m5/q0-p0 and m3/mask)
  1323. mova m4, m5
  1324. pxor m5, m0
  1325. pxor m0, m6
  1326. psubsb m5, m0 ; q0-p0 (signed)
  1327. paddsb m2, m5
  1328. paddsb m2, m5
  1329. paddsb m2, m5 ; a=(p1-q1) + 3*(q0-p0)
  1330. pand m2, m3 ; apply filter mask (m3)
  1331. mova m3, [pb_F8]
  1332. mova m1, m2
  1333. paddsb m2, [pb_4] ; f1<<3=a+4
  1334. paddsb m1, [pb_3] ; f2<<3=a+3
  1335. pand m2, m3
  1336. pand m1, m3 ; cache f2<<3
  1337. pxor m0, m0
  1338. pxor m3, m3
  1339. pcmpgtb m0, m2 ; which values are <0?
  1340. psubb m3, m2 ; -f1<<3
  1341. psrlq m2, 3 ; +f1
  1342. psrlq m3, 3 ; -f1
  1343. pand m3, m0
  1344. pandn m0, m2
  1345. psubusb m4, m0
  1346. paddusb m4, m3 ; q0-f1
  1347. pxor m0, m0
  1348. pxor m3, m3
  1349. pcmpgtb m0, m1 ; which values are <0?
  1350. psubb m3, m1 ; -f2<<3
  1351. psrlq m1, 3 ; +f2
  1352. psrlq m3, 3 ; -f2
  1353. pand m3, m0
  1354. pandn m0, m1
  1355. paddusb m6, m0
  1356. psubusb m6, m3 ; p0+f2
  1357. ; store
  1358. %ifidn %2, v
  1359. mova [r0], m4
  1360. mova [r0+r1], m6
  1361. %else ; h
  1362. mova m0, [rsp] ; p1
  1363. SWAP 2, 4 ; p0
  1364. SWAP 1, 6 ; q0
  1365. mova m3, [rsp+mmsize] ; q1
  1366. TRANSPOSE4x4B 0, 1, 2, 3, 4
  1367. %if mmsize == 16 ; sse2
  1368. add r3, r1 ; change from r4*8*stride to r0+8*stride
  1369. WRITE_4x4D 0, 1, 2, 3, r0, r4, r3, r1, r2, 16
  1370. %else ; mmx/mmxext
  1371. WRITE_4x2D 0, 1, 2, 3, r0, r4, r1, r2
  1372. %endif
  1373. %endif
  1374. %if mmsize == 8 ; mmx/mmxext
  1375. ; next 8 pixels
  1376. %ifidn %2, v
  1377. add r0, 8 ; advance 8 cols = pixels
  1378. %else ; h
  1379. lea r0, [r0+r2*8] ; advance 8 rows = lines
  1380. %endif
  1381. dec r3
  1382. jg .next8px
  1383. %ifidn %2, v
  1384. REP_RET
  1385. %else ; h
  1386. mov rsp, r5 ; restore stack pointer
  1387. RET
  1388. %endif
  1389. %else ; sse2
  1390. %ifidn %2, h
  1391. mov rsp, r5 ; restore stack pointer
  1392. %endif
  1393. RET
  1394. %endif
  1395. %endmacro
  1396. INIT_MMX
  1397. %define SPLATB_REG SPLATB_REG_MMX
  1398. SIMPLE_LOOPFILTER mmx, v, 4
  1399. SIMPLE_LOOPFILTER mmx, h, 6
  1400. %define SPLATB_REG SPLATB_REG_MMXEXT
  1401. SIMPLE_LOOPFILTER mmxext, v, 4
  1402. SIMPLE_LOOPFILTER mmxext, h, 6
  1403. INIT_XMM
  1404. %define SPLATB_REG SPLATB_REG_SSE2
  1405. SIMPLE_LOOPFILTER sse2, v, 3
  1406. SIMPLE_LOOPFILTER sse2, h, 6
  1407. %define SPLATB_REG SPLATB_REG_SSSE3
  1408. SIMPLE_LOOPFILTER ssse3, v, 3
  1409. SIMPLE_LOOPFILTER ssse3, h, 6
  1410. ;-----------------------------------------------------------------------------
  1411. ; void vp8_h/v_loop_filter<size>_inner_<opt>(uint8_t *dst, [uint8_t *v,] int stride,
  1412. ; int flimE, int flimI, int hev_thr);
  1413. ;-----------------------------------------------------------------------------
  1414. %macro INNER_LOOPFILTER 5
  1415. %if %4 == 8 ; chroma
  1416. cglobal vp8_%2_loop_filter8uv_inner_%1, 6, %3, %5
  1417. %define dst8_reg r1
  1418. %define mstride_reg r2
  1419. %define E_reg r3
  1420. %define I_reg r4
  1421. %define hev_thr_reg r5
  1422. %else ; luma
  1423. cglobal vp8_%2_loop_filter16y_inner_%1, 5, %3, %5
  1424. %define mstride_reg r1
  1425. %define E_reg r2
  1426. %define I_reg r3
  1427. %define hev_thr_reg r4
  1428. %ifdef m8 ; x86-64, sse2
  1429. %define dst8_reg r4
  1430. %elif mmsize == 16 ; x86-32, sse2
  1431. %define dst8_reg r5
  1432. %else ; x86-32, mmx/mmxext
  1433. %define cnt_reg r5
  1434. %endif
  1435. %endif
  1436. %define dst_reg r0
  1437. %define stride_reg E_reg
  1438. %define dst2_reg I_reg
  1439. %ifndef m8
  1440. %define stack_reg hev_thr_reg
  1441. %endif
  1442. %ifnidn %1, sse2
  1443. %if mmsize == 16
  1444. pxor m7, m7
  1445. %endif
  1446. %endif
  1447. %ifndef m8 ; mmx/mmxext or sse2 on x86-32
  1448. ; splat function arguments
  1449. SPLATB_REG m0, E_reg, m7 ; E
  1450. SPLATB_REG m1, I_reg, m7 ; I
  1451. SPLATB_REG m2, hev_thr_reg, m7 ; hev_thresh
  1452. ; align stack
  1453. mov stack_reg, rsp ; backup stack pointer
  1454. and rsp, ~(mmsize-1) ; align stack
  1455. %ifidn %2, v
  1456. sub rsp, mmsize * 4 ; stack layout: [0]=E, [1]=I, [2]=hev_thr
  1457. ; [3]=hev() result
  1458. %else ; h
  1459. sub rsp, mmsize * 5 ; extra storage space for transposes
  1460. %endif
  1461. %define flim_E [rsp]
  1462. %define flim_I [rsp+mmsize]
  1463. %define hev_thr [rsp+mmsize*2]
  1464. %define mask_res [rsp+mmsize*3]
  1465. %define p0backup [rsp+mmsize*3]
  1466. %define q0backup [rsp+mmsize*4]
  1467. mova flim_E, m0
  1468. mova flim_I, m1
  1469. mova hev_thr, m2
  1470. %else ; sse2 on x86-64
  1471. %define flim_E m9
  1472. %define flim_I m10
  1473. %define hev_thr m11
  1474. %define mask_res m12
  1475. %define p0backup m12
  1476. %define q0backup m8
  1477. ; splat function arguments
  1478. SPLATB_REG flim_E, E_reg, m7 ; E
  1479. SPLATB_REG flim_I, I_reg, m7 ; I
  1480. SPLATB_REG hev_thr, hev_thr_reg, m7 ; hev_thresh
  1481. %endif
  1482. %if mmsize == 8 && %4 == 16 ; mmx/mmxext
  1483. mov cnt_reg, 2
  1484. %endif
  1485. mov stride_reg, mstride_reg
  1486. neg mstride_reg
  1487. %ifidn %2, h
  1488. lea dst_reg, [dst_reg + stride_reg*4-4]
  1489. %if %4 == 8
  1490. lea dst8_reg, [dst8_reg+ stride_reg*4-4]
  1491. %endif
  1492. %endif
  1493. %if mmsize == 8
  1494. .next8px
  1495. %endif
  1496. ; read
  1497. lea dst2_reg, [dst_reg + stride_reg]
  1498. %ifidn %2, v
  1499. %if %4 == 8 && mmsize == 16
  1500. %define movrow movh
  1501. %else
  1502. %define movrow mova
  1503. %endif
  1504. movrow m0, [dst_reg +mstride_reg*4] ; p3
  1505. movrow m1, [dst2_reg+mstride_reg*4] ; p2
  1506. movrow m2, [dst_reg +mstride_reg*2] ; p1
  1507. movrow m5, [dst2_reg] ; q1
  1508. movrow m6, [dst2_reg+ stride_reg] ; q2
  1509. movrow m7, [dst2_reg+ stride_reg*2] ; q3
  1510. %if mmsize == 16 && %4 == 8
  1511. movhps m0, [dst8_reg+mstride_reg*4]
  1512. movhps m2, [dst8_reg+mstride_reg*2]
  1513. add dst8_reg, stride_reg
  1514. movhps m1, [dst8_reg+mstride_reg*4]
  1515. movhps m5, [dst8_reg]
  1516. movhps m6, [dst8_reg+ stride_reg]
  1517. movhps m7, [dst8_reg+ stride_reg*2]
  1518. add dst8_reg, mstride_reg
  1519. %endif
  1520. %elif mmsize == 8 ; mmx/mmxext (h)
  1521. ; read 8 rows of 8px each
  1522. movu m0, [dst_reg +mstride_reg*4]
  1523. movu m1, [dst2_reg+mstride_reg*4]
  1524. movu m2, [dst_reg +mstride_reg*2]
  1525. movu m3, [dst_reg +mstride_reg]
  1526. movu m4, [dst_reg]
  1527. movu m5, [dst2_reg]
  1528. movu m6, [dst2_reg+ stride_reg]
  1529. ; 8x8 transpose
  1530. TRANSPOSE4x4B 0, 1, 2, 3, 7
  1531. mova q0backup, m1
  1532. movu m7, [dst2_reg+ stride_reg*2]
  1533. TRANSPOSE4x4B 4, 5, 6, 7, 1
  1534. SBUTTERFLY dq, 0, 4, 1 ; p3/p2
  1535. SBUTTERFLY dq, 2, 6, 1 ; q0/q1
  1536. SBUTTERFLY dq, 3, 7, 1 ; q2/q3
  1537. mova m1, q0backup
  1538. mova q0backup, m2 ; store q0
  1539. SBUTTERFLY dq, 1, 5, 2 ; p1/p0
  1540. mova p0backup, m5 ; store p0
  1541. SWAP 1, 4
  1542. SWAP 2, 4
  1543. SWAP 6, 3
  1544. SWAP 5, 3
  1545. %else ; sse2 (h)
  1546. %if %4 == 16
  1547. lea dst8_reg, [dst_reg + stride_reg*8]
  1548. %endif
  1549. ; read 16 rows of 8px each, interleave
  1550. movh m0, [dst_reg +mstride_reg*4]
  1551. movh m1, [dst8_reg+mstride_reg*4]
  1552. movh m2, [dst_reg +mstride_reg*2]
  1553. movh m5, [dst8_reg+mstride_reg*2]
  1554. movh m3, [dst_reg +mstride_reg]
  1555. movh m6, [dst8_reg+mstride_reg]
  1556. movh m4, [dst_reg]
  1557. movh m7, [dst8_reg]
  1558. punpcklbw m0, m1 ; A/I
  1559. punpcklbw m2, m5 ; C/K
  1560. punpcklbw m3, m6 ; D/L
  1561. punpcklbw m4, m7 ; E/M
  1562. add dst8_reg, stride_reg
  1563. movh m1, [dst2_reg+mstride_reg*4]
  1564. movh m6, [dst8_reg+mstride_reg*4]
  1565. movh m5, [dst2_reg]
  1566. movh m7, [dst8_reg]
  1567. punpcklbw m1, m6 ; B/J
  1568. punpcklbw m5, m7 ; F/N
  1569. movh m6, [dst2_reg+ stride_reg]
  1570. movh m7, [dst8_reg+ stride_reg]
  1571. punpcklbw m6, m7 ; G/O
  1572. ; 8x16 transpose
  1573. TRANSPOSE4x4B 0, 1, 2, 3, 7
  1574. %ifdef m8
  1575. SWAP 1, 8
  1576. %else
  1577. mova q0backup, m1
  1578. %endif
  1579. movh m7, [dst2_reg+ stride_reg*2]
  1580. movh m1, [dst8_reg+ stride_reg*2]
  1581. punpcklbw m7, m1 ; H/P
  1582. TRANSPOSE4x4B 4, 5, 6, 7, 1
  1583. SBUTTERFLY dq, 0, 4, 1 ; p3/p2
  1584. SBUTTERFLY dq, 2, 6, 1 ; q0/q1
  1585. SBUTTERFLY dq, 3, 7, 1 ; q2/q3
  1586. %ifdef m8
  1587. SWAP 1, 8
  1588. SWAP 2, 8
  1589. %else
  1590. mova m1, q0backup
  1591. mova q0backup, m2 ; store q0
  1592. %endif
  1593. SBUTTERFLY dq, 1, 5, 2 ; p1/p0
  1594. %ifdef m12
  1595. SWAP 5, 12
  1596. %else
  1597. mova p0backup, m5 ; store p0
  1598. %endif
  1599. SWAP 1, 4
  1600. SWAP 2, 4
  1601. SWAP 6, 3
  1602. SWAP 5, 3
  1603. %endif
  1604. ; normal_limit for p3-p2, p2-p1, q3-q2 and q2-q1
  1605. mova m4, m1
  1606. SWAP 4, 1
  1607. psubusb m4, m0 ; p2-p3
  1608. psubusb m0, m1 ; p3-p2
  1609. por m0, m4 ; abs(p3-p2)
  1610. mova m4, m2
  1611. SWAP 4, 2
  1612. psubusb m4, m1 ; p1-p2
  1613. psubusb m1, m2 ; p2-p1
  1614. por m1, m4 ; abs(p2-p1)
  1615. mova m4, m6
  1616. SWAP 4, 6
  1617. psubusb m4, m7 ; q2-q3
  1618. psubusb m7, m6 ; q3-q2
  1619. por m7, m4 ; abs(q3-q2)
  1620. mova m4, m5
  1621. SWAP 4, 5
  1622. psubusb m4, m6 ; q1-q2
  1623. psubusb m6, m5 ; q2-q1
  1624. por m6, m4 ; abs(q2-q1)
  1625. %ifidn %1, mmx
  1626. mova m4, flim_I
  1627. pxor m3, m3
  1628. psubusb m0, m4
  1629. psubusb m1, m4
  1630. psubusb m7, m4
  1631. psubusb m6, m4
  1632. pcmpeqb m0, m3 ; abs(p3-p2) <= I
  1633. pcmpeqb m1, m3 ; abs(p2-p1) <= I
  1634. pcmpeqb m7, m3 ; abs(q3-q2) <= I
  1635. pcmpeqb m6, m3 ; abs(q2-q1) <= I
  1636. pand m0, m1
  1637. pand m7, m6
  1638. pand m0, m7
  1639. %else ; mmxext/sse2
  1640. pmaxub m0, m1
  1641. pmaxub m6, m7
  1642. pmaxub m0, m6
  1643. %endif
  1644. ; normal_limit and high_edge_variance for p1-p0, q1-q0
  1645. SWAP 7, 3 ; now m7 is zero
  1646. %ifidn %2, v
  1647. movrow m3, [dst_reg +mstride_reg] ; p0
  1648. %if mmsize == 16 && %4 == 8
  1649. movhps m3, [dst8_reg+mstride_reg]
  1650. %endif
  1651. %elifdef m12
  1652. SWAP 3, 12
  1653. %else
  1654. mova m3, p0backup
  1655. %endif
  1656. mova m1, m2
  1657. SWAP 1, 2
  1658. mova m6, m3
  1659. SWAP 3, 6
  1660. psubusb m1, m3 ; p1-p0
  1661. psubusb m6, m2 ; p0-p1
  1662. por m1, m6 ; abs(p1-p0)
  1663. %ifidn %1, mmx
  1664. mova m6, m1
  1665. psubusb m1, m4
  1666. psubusb m6, hev_thr
  1667. pcmpeqb m1, m7 ; abs(p1-p0) <= I
  1668. pcmpeqb m6, m7 ; abs(p1-p0) <= hev_thresh
  1669. pand m0, m1
  1670. mova mask_res, m6
  1671. %else ; mmxext/sse2
  1672. pmaxub m0, m1 ; max_I
  1673. SWAP 1, 4 ; max_hev_thresh
  1674. %endif
  1675. SWAP 6, 4 ; now m6 is I
  1676. %ifidn %2, v
  1677. movrow m4, [dst_reg] ; q0
  1678. %if mmsize == 16 && %4 == 8
  1679. movhps m4, [dst8_reg]
  1680. %endif
  1681. %elifdef m8
  1682. SWAP 4, 8
  1683. %else
  1684. mova m4, q0backup
  1685. %endif
  1686. mova m1, m4
  1687. SWAP 1, 4
  1688. mova m7, m5
  1689. SWAP 7, 5
  1690. psubusb m1, m5 ; q0-q1
  1691. psubusb m7, m4 ; q1-q0
  1692. por m1, m7 ; abs(q1-q0)
  1693. %ifidn %1, mmx
  1694. mova m7, m1
  1695. psubusb m1, m6
  1696. psubusb m7, hev_thr
  1697. pxor m6, m6
  1698. pcmpeqb m1, m6 ; abs(q1-q0) <= I
  1699. pcmpeqb m7, m6 ; abs(q1-q0) <= hev_thresh
  1700. mova m6, mask_res
  1701. pand m0, m1 ; abs([pq][321]-[pq][210]) <= I
  1702. pand m6, m7
  1703. %else ; mmxext/sse2
  1704. pxor m7, m7
  1705. pmaxub m0, m1
  1706. pmaxub m6, m1
  1707. psubusb m0, flim_I
  1708. psubusb m6, hev_thr
  1709. pcmpeqb m0, m7 ; max(abs(..)) <= I
  1710. pcmpeqb m6, m7 ; !(max(abs..) > thresh)
  1711. %endif
  1712. %ifdef m12
  1713. SWAP 6, 12
  1714. %else
  1715. mova mask_res, m6 ; !(abs(p1-p0) > hev_t || abs(q1-q0) > hev_t)
  1716. %endif
  1717. ; simple_limit
  1718. mova m1, m3
  1719. SWAP 1, 3
  1720. mova m6, m4 ; keep copies of p0/q0 around for later use
  1721. SWAP 6, 4
  1722. psubusb m1, m4 ; p0-q0
  1723. psubusb m6, m3 ; q0-p0
  1724. por m1, m6 ; abs(q0-p0)
  1725. paddusb m1, m1 ; m1=2*abs(q0-p0)
  1726. mova m7, m2
  1727. SWAP 7, 2
  1728. mova m6, m5
  1729. SWAP 6, 5
  1730. psubusb m7, m5 ; p1-q1
  1731. psubusb m6, m2 ; q1-p1
  1732. por m7, m6 ; abs(q1-p1)
  1733. pxor m6, m6
  1734. pand m7, [pb_FE]
  1735. psrlq m7, 1 ; abs(q1-p1)/2
  1736. paddusb m7, m1 ; abs(q0-p0)*2+abs(q1-p1)/2
  1737. psubusb m7, flim_E
  1738. pcmpeqb m7, m6 ; abs(q0-p0)*2+abs(q1-p1)/2 <= E
  1739. pand m0, m7 ; normal_limit result
  1740. ; filter_common; at this point, m2-m5=p1-q1 and m0 is filter_mask
  1741. %ifdef m8 ; x86-64 && sse2
  1742. mova m8, [pb_80]
  1743. %define pb_80_var m8
  1744. %else ; x86-32 or mmx/mmxext
  1745. %define pb_80_var [pb_80]
  1746. %endif
  1747. mova m1, m4
  1748. mova m7, m3
  1749. pxor m1, pb_80_var
  1750. pxor m7, pb_80_var
  1751. psubsb m1, m7 ; (signed) q0-p0
  1752. mova m6, m2
  1753. mova m7, m5
  1754. pxor m6, pb_80_var
  1755. pxor m7, pb_80_var
  1756. psubsb m6, m7 ; (signed) p1-q1
  1757. mova m7, mask_res
  1758. pandn m7, m6
  1759. paddsb m7, m1
  1760. paddsb m7, m1
  1761. paddsb m7, m1 ; 3*(q0-p0)+is4tap?(p1-q1)
  1762. pand m7, m0
  1763. mova m1, [pb_F8]
  1764. mova m6, m7
  1765. paddsb m7, [pb_3]
  1766. paddsb m6, [pb_4]
  1767. pand m7, m1
  1768. pand m6, m1
  1769. pxor m1, m1
  1770. pxor m0, m0
  1771. pcmpgtb m1, m7
  1772. psubb m0, m7
  1773. psrlq m7, 3 ; +f2
  1774. psrlq m0, 3 ; -f2
  1775. pand m0, m1
  1776. pandn m1, m7
  1777. psubusb m3, m0
  1778. paddusb m3, m1 ; p0+f2
  1779. pxor m1, m1
  1780. pxor m0, m0
  1781. pcmpgtb m0, m6
  1782. psubb m1, m6
  1783. psrlq m6, 3 ; +f1
  1784. psrlq m1, 3 ; -f1
  1785. pand m1, m0
  1786. pandn m0, m6
  1787. psubusb m4, m0
  1788. paddusb m4, m1 ; q0-f1
  1789. %ifdef m12
  1790. SWAP 6, 12
  1791. %else
  1792. mova m6, mask_res
  1793. %endif
  1794. %ifidn %1, mmx
  1795. mova m7, [pb_1]
  1796. %else ; mmxext/sse2
  1797. pxor m7, m7
  1798. %endif
  1799. pand m0, m6
  1800. pand m1, m6
  1801. %ifidn %1, mmx
  1802. paddusb m0, m7
  1803. pand m1, [pb_FE]
  1804. pandn m7, m0
  1805. psrlq m1, 1
  1806. psrlq m7, 1
  1807. SWAP 0, 7
  1808. %else ; mmxext/sse2
  1809. psubusb m1, [pb_1]
  1810. pavgb m0, m7 ; a
  1811. pavgb m1, m7 ; -a
  1812. %endif
  1813. psubusb m5, m0
  1814. psubusb m2, m1
  1815. paddusb m5, m1 ; q1-a
  1816. paddusb m2, m0 ; p1+a
  1817. ; store
  1818. %ifidn %2, v
  1819. movrow [dst_reg +mstride_reg*2], m2
  1820. movrow [dst_reg +mstride_reg ], m3
  1821. movrow [dst_reg], m4
  1822. movrow [dst_reg + stride_reg ], m5
  1823. %if mmsize == 16 && %4 == 8
  1824. movhps [dst8_reg+mstride_reg*2], m2
  1825. movhps [dst8_reg+mstride_reg ], m3
  1826. movhps [dst8_reg], m4
  1827. movhps [dst8_reg+ stride_reg ], m5
  1828. %endif
  1829. %else ; h
  1830. add dst_reg, 2
  1831. add dst2_reg, 2
  1832. ; 4x8/16 transpose
  1833. TRANSPOSE4x4B 2, 3, 4, 5, 6
  1834. %if mmsize == 8 ; mmx/mmxext (h)
  1835. WRITE_4x2D 2, 3, 4, 5, dst_reg, dst2_reg, mstride_reg, stride_reg
  1836. %else ; sse2 (h)
  1837. lea dst8_reg, [dst8_reg+mstride_reg+2]
  1838. WRITE_4x4D 2, 3, 4, 5, dst_reg, dst2_reg, dst8_reg, mstride_reg, stride_reg, %4
  1839. %endif
  1840. %endif
  1841. %if mmsize == 8
  1842. %if %4 == 8 ; chroma
  1843. %ifidn %2, h
  1844. sub dst_reg, 2
  1845. %endif
  1846. cmp dst_reg, dst8_reg
  1847. mov dst_reg, dst8_reg
  1848. jnz .next8px
  1849. %else
  1850. %ifidn %2, h
  1851. lea dst_reg, [dst_reg + stride_reg*8-2]
  1852. %else ; v
  1853. add dst_reg, 8
  1854. %endif
  1855. dec cnt_reg
  1856. jg .next8px
  1857. %endif
  1858. %endif
  1859. %ifndef m8 ; sse2 on x86-32 or mmx/mmxext
  1860. mov rsp, stack_reg ; restore stack pointer
  1861. %endif
  1862. RET
  1863. %endmacro
  1864. INIT_MMX
  1865. %define SPLATB_REG SPLATB_REG_MMX
  1866. INNER_LOOPFILTER mmx, v, 6, 16, 0
  1867. INNER_LOOPFILTER mmx, h, 6, 16, 0
  1868. INNER_LOOPFILTER mmx, v, 6, 8, 0
  1869. INNER_LOOPFILTER mmx, h, 6, 8, 0
  1870. %define SPLATB_REG SPLATB_REG_MMXEXT
  1871. INNER_LOOPFILTER mmxext, v, 6, 16, 0
  1872. INNER_LOOPFILTER mmxext, h, 6, 16, 0
  1873. INNER_LOOPFILTER mmxext, v, 6, 8, 0
  1874. INNER_LOOPFILTER mmxext, h, 6, 8, 0
  1875. INIT_XMM
  1876. %define SPLATB_REG SPLATB_REG_SSE2
  1877. INNER_LOOPFILTER sse2, v, 5, 16, 13
  1878. %ifdef m8
  1879. INNER_LOOPFILTER sse2, h, 5, 16, 13
  1880. %else
  1881. INNER_LOOPFILTER sse2, h, 6, 16, 13
  1882. %endif
  1883. INNER_LOOPFILTER sse2, v, 6, 8, 13
  1884. INNER_LOOPFILTER sse2, h, 6, 8, 13
  1885. %define SPLATB_REG SPLATB_REG_SSSE3
  1886. INNER_LOOPFILTER ssse3, v, 5, 16, 13
  1887. %ifdef m8
  1888. INNER_LOOPFILTER ssse3, h, 5, 16, 13
  1889. %else
  1890. INNER_LOOPFILTER ssse3, h, 6, 16, 13
  1891. %endif
  1892. INNER_LOOPFILTER ssse3, v, 6, 8, 13
  1893. INNER_LOOPFILTER ssse3, h, 6, 8, 13
  1894. ;-----------------------------------------------------------------------------
  1895. ; void vp8_h/v_loop_filter<size>_mbedge_<opt>(uint8_t *dst, [uint8_t *v,] int stride,
  1896. ; int flimE, int flimI, int hev_thr);
  1897. ;-----------------------------------------------------------------------------
  1898. ; write 4 or 8 words in the mmx/xmm registers as 8 lines
  1899. ; 1 and 2 are the registers to write, this can be the same (for SSE2)
  1900. ; for pre-SSE4:
  1901. ; 3 is a general-purpose register that we will clobber
  1902. ; for SSE4:
  1903. ; 3 is a pointer to the destination's 5th line
  1904. ; 4 is a pointer to the destination's 4th line
  1905. ; 5/6 is -stride and +stride
  1906. %macro WRITE_2x4W 6
  1907. movd %3, %1
  1908. punpckhdq %1, %1
  1909. mov [%4+%5*4], %3w
  1910. shr %3, 16
  1911. add %4, %6
  1912. mov [%4+%5*4], %3w
  1913. movd %3, %1
  1914. add %4, %5
  1915. mov [%4+%5*2], %3w
  1916. shr %3, 16
  1917. mov [%4+%5 ], %3w
  1918. movd %3, %2
  1919. punpckhdq %2, %2
  1920. mov [%4 ], %3w
  1921. shr %3, 16
  1922. mov [%4+%6 ], %3w
  1923. movd %3, %2
  1924. add %4, %6
  1925. mov [%4+%6 ], %3w
  1926. shr %3, 16
  1927. mov [%4+%6*2], %3w
  1928. add %4, %5
  1929. %endmacro
  1930. %macro WRITE_8W_SSE2 5
  1931. movd %2, %1
  1932. psrldq %1, 4
  1933. mov [%3+%4*4], %2w
  1934. shr %2, 16
  1935. add %3, %5
  1936. mov [%3+%4*4], %2w
  1937. movd %2, %1
  1938. psrldq %1, 4
  1939. add %3, %4
  1940. mov [%3+%4*2], %2w
  1941. shr %2, 16
  1942. mov [%3+%4 ], %2w
  1943. movd %2, %1
  1944. psrldq %1, 4
  1945. mov [%3 ], %2w
  1946. shr %2, 16
  1947. mov [%3+%5 ], %2w
  1948. movd %2, %1
  1949. add %3, %5
  1950. mov [%3+%5 ], %2w
  1951. shr %2, 16
  1952. mov [%3+%5*2], %2w
  1953. %endmacro
  1954. %macro WRITE_8W_SSE4 5
  1955. pextrw [%3+%4*4], %1, 0
  1956. pextrw [%2+%4*4], %1, 1
  1957. pextrw [%3+%4*2], %1, 2
  1958. pextrw [%3+%4 ], %1, 3
  1959. pextrw [%3 ], %1, 4
  1960. pextrw [%2 ], %1, 5
  1961. pextrw [%2+%5 ], %1, 6
  1962. pextrw [%2+%5*2], %1, 7
  1963. %endmacro
  1964. %macro MBEDGE_LOOPFILTER 5
  1965. %if %4 == 8 ; chroma
  1966. cglobal vp8_%2_loop_filter8uv_mbedge_%1, 6, %3, %5
  1967. %define dst8_reg r1
  1968. %define mstride_reg r2
  1969. %define E_reg r3
  1970. %define I_reg r4
  1971. %define hev_thr_reg r5
  1972. %else ; luma
  1973. cglobal vp8_%2_loop_filter16y_mbedge_%1, 5, %3, %5
  1974. %define mstride_reg r1
  1975. %define E_reg r2
  1976. %define I_reg r3
  1977. %define hev_thr_reg r4
  1978. %ifdef m8 ; x86-64, sse2
  1979. %define dst8_reg r4
  1980. %elif mmsize == 16 ; x86-32, sse2
  1981. %define dst8_reg r5
  1982. %else ; x86-32, mmx/mmxext
  1983. %define cnt_reg r5
  1984. %endif
  1985. %endif
  1986. %define dst_reg r0
  1987. %define stride_reg E_reg
  1988. %define dst2_reg I_reg
  1989. %ifndef m8
  1990. %define stack_reg hev_thr_reg
  1991. %endif
  1992. %ifnidn %1, sse2
  1993. %if mmsize == 16
  1994. pxor m7, m7
  1995. %endif
  1996. %endif
  1997. %ifndef m8 ; mmx/mmxext or sse2 on x86-32
  1998. ; splat function arguments
  1999. SPLATB_REG m0, E_reg, m7 ; E
  2000. SPLATB_REG m1, I_reg, m7 ; I
  2001. SPLATB_REG m2, hev_thr_reg, m7 ; hev_thresh
  2002. ; align stack
  2003. mov stack_reg, rsp ; backup stack pointer
  2004. and rsp, ~(mmsize-1) ; align stack
  2005. %if mmsize == 16
  2006. sub rsp, mmsize * 7
  2007. %else
  2008. sub rsp, mmsize * 8 ; stack layout: [0]=E, [1]=I, [2]=hev_thr
  2009. ; [3]=hev() result
  2010. ; [4]=filter tmp result
  2011. ; [5]/[6] = p2/q2 backup
  2012. ; [7]=lim_res sign result
  2013. %endif
  2014. %define flim_E [rsp]
  2015. %define flim_I [rsp+mmsize]
  2016. %define hev_thr [rsp+mmsize*2]
  2017. %define mask_res [rsp+mmsize*3]
  2018. %define lim_res [rsp+mmsize*4]
  2019. %define p0backup [rsp+mmsize*3]
  2020. %define q0backup [rsp+mmsize*4]
  2021. %define p2backup [rsp+mmsize*5]
  2022. %define q2backup [rsp+mmsize*6]
  2023. %if mmsize == 16
  2024. %define lim_sign [rsp]
  2025. %else
  2026. %define lim_sign [rsp+mmsize*7]
  2027. %endif
  2028. mova flim_E, m0
  2029. mova flim_I, m1
  2030. mova hev_thr, m2
  2031. %else ; sse2 on x86-64
  2032. %define flim_E m9
  2033. %define flim_I m10
  2034. %define hev_thr m11
  2035. %define mask_res m12
  2036. %define lim_res m8
  2037. %define p0backup m12
  2038. %define q0backup m8
  2039. %define p2backup m13
  2040. %define q2backup m14
  2041. %define lim_sign m9
  2042. ; splat function arguments
  2043. SPLATB_REG flim_E, E_reg, m7 ; E
  2044. SPLATB_REG flim_I, I_reg, m7 ; I
  2045. SPLATB_REG hev_thr, hev_thr_reg, m7 ; hev_thresh
  2046. %endif
  2047. %if mmsize == 8 && %4 == 16 ; mmx/mmxext
  2048. mov cnt_reg, 2
  2049. %endif
  2050. mov stride_reg, mstride_reg
  2051. neg mstride_reg
  2052. %ifidn %2, h
  2053. lea dst_reg, [dst_reg + stride_reg*4-4]
  2054. %if %4 == 8
  2055. lea dst8_reg, [dst8_reg+ stride_reg*4-4]
  2056. %endif
  2057. %endif
  2058. %if mmsize == 8
  2059. .next8px
  2060. %endif
  2061. ; read
  2062. lea dst2_reg, [dst_reg + stride_reg]
  2063. %ifidn %2, v
  2064. %if %4 == 8 && mmsize == 16
  2065. %define movrow movh
  2066. %else
  2067. %define movrow mova
  2068. %endif
  2069. movrow m0, [dst_reg +mstride_reg*4] ; p3
  2070. movrow m1, [dst2_reg+mstride_reg*4] ; p2
  2071. movrow m2, [dst_reg +mstride_reg*2] ; p1
  2072. movrow m5, [dst2_reg] ; q1
  2073. movrow m6, [dst2_reg+ stride_reg] ; q2
  2074. movrow m7, [dst2_reg+ stride_reg*2] ; q3
  2075. %if mmsize == 16 && %4 == 8
  2076. movhps m0, [dst8_reg+mstride_reg*4]
  2077. movhps m2, [dst8_reg+mstride_reg*2]
  2078. add dst8_reg, stride_reg
  2079. movhps m1, [dst8_reg+mstride_reg*4]
  2080. movhps m5, [dst8_reg]
  2081. movhps m6, [dst8_reg+ stride_reg]
  2082. movhps m7, [dst8_reg+ stride_reg*2]
  2083. add dst8_reg, mstride_reg
  2084. %endif
  2085. %elif mmsize == 8 ; mmx/mmxext (h)
  2086. ; read 8 rows of 8px each
  2087. movu m0, [dst_reg +mstride_reg*4]
  2088. movu m1, [dst2_reg+mstride_reg*4]
  2089. movu m2, [dst_reg +mstride_reg*2]
  2090. movu m3, [dst_reg +mstride_reg]
  2091. movu m4, [dst_reg]
  2092. movu m5, [dst2_reg]
  2093. movu m6, [dst2_reg+ stride_reg]
  2094. ; 8x8 transpose
  2095. TRANSPOSE4x4B 0, 1, 2, 3, 7
  2096. mova q0backup, m1
  2097. movu m7, [dst2_reg+ stride_reg*2]
  2098. TRANSPOSE4x4B 4, 5, 6, 7, 1
  2099. SBUTTERFLY dq, 0, 4, 1 ; p3/p2
  2100. SBUTTERFLY dq, 2, 6, 1 ; q0/q1
  2101. SBUTTERFLY dq, 3, 7, 1 ; q2/q3
  2102. mova m1, q0backup
  2103. mova q0backup, m2 ; store q0
  2104. SBUTTERFLY dq, 1, 5, 2 ; p1/p0
  2105. mova p0backup, m5 ; store p0
  2106. SWAP 1, 4
  2107. SWAP 2, 4
  2108. SWAP 6, 3
  2109. SWAP 5, 3
  2110. %else ; sse2 (h)
  2111. %if %4 == 16
  2112. lea dst8_reg, [dst_reg + stride_reg*8]
  2113. %endif
  2114. ; read 16 rows of 8px each, interleave
  2115. movh m0, [dst_reg +mstride_reg*4]
  2116. movh m1, [dst8_reg+mstride_reg*4]
  2117. movh m2, [dst_reg +mstride_reg*2]
  2118. movh m5, [dst8_reg+mstride_reg*2]
  2119. movh m3, [dst_reg +mstride_reg]
  2120. movh m6, [dst8_reg+mstride_reg]
  2121. movh m4, [dst_reg]
  2122. movh m7, [dst8_reg]
  2123. punpcklbw m0, m1 ; A/I
  2124. punpcklbw m2, m5 ; C/K
  2125. punpcklbw m3, m6 ; D/L
  2126. punpcklbw m4, m7 ; E/M
  2127. add dst8_reg, stride_reg
  2128. movh m1, [dst2_reg+mstride_reg*4]
  2129. movh m6, [dst8_reg+mstride_reg*4]
  2130. movh m5, [dst2_reg]
  2131. movh m7, [dst8_reg]
  2132. punpcklbw m1, m6 ; B/J
  2133. punpcklbw m5, m7 ; F/N
  2134. movh m6, [dst2_reg+ stride_reg]
  2135. movh m7, [dst8_reg+ stride_reg]
  2136. punpcklbw m6, m7 ; G/O
  2137. ; 8x16 transpose
  2138. TRANSPOSE4x4B 0, 1, 2, 3, 7
  2139. %ifdef m8
  2140. SWAP 1, 8
  2141. %else
  2142. mova q0backup, m1
  2143. %endif
  2144. movh m7, [dst2_reg+ stride_reg*2]
  2145. movh m1, [dst8_reg+ stride_reg*2]
  2146. punpcklbw m7, m1 ; H/P
  2147. TRANSPOSE4x4B 4, 5, 6, 7, 1
  2148. SBUTTERFLY dq, 0, 4, 1 ; p3/p2
  2149. SBUTTERFLY dq, 2, 6, 1 ; q0/q1
  2150. SBUTTERFLY dq, 3, 7, 1 ; q2/q3
  2151. %ifdef m8
  2152. SWAP 1, 8
  2153. SWAP 2, 8
  2154. %else
  2155. mova m1, q0backup
  2156. mova q0backup, m2 ; store q0
  2157. %endif
  2158. SBUTTERFLY dq, 1, 5, 2 ; p1/p0
  2159. %ifdef m12
  2160. SWAP 5, 12
  2161. %else
  2162. mova p0backup, m5 ; store p0
  2163. %endif
  2164. SWAP 1, 4
  2165. SWAP 2, 4
  2166. SWAP 6, 3
  2167. SWAP 5, 3
  2168. %endif
  2169. ; normal_limit for p3-p2, p2-p1, q3-q2 and q2-q1
  2170. mova m4, m1
  2171. SWAP 4, 1
  2172. psubusb m4, m0 ; p2-p3
  2173. psubusb m0, m1 ; p3-p2
  2174. por m0, m4 ; abs(p3-p2)
  2175. mova m4, m2
  2176. SWAP 4, 2
  2177. psubusb m4, m1 ; p1-p2
  2178. mova p2backup, m1
  2179. psubusb m1, m2 ; p2-p1
  2180. por m1, m4 ; abs(p2-p1)
  2181. mova m4, m6
  2182. SWAP 4, 6
  2183. psubusb m4, m7 ; q2-q3
  2184. psubusb m7, m6 ; q3-q2
  2185. por m7, m4 ; abs(q3-q2)
  2186. mova m4, m5
  2187. SWAP 4, 5
  2188. psubusb m4, m6 ; q1-q2
  2189. mova q2backup, m6
  2190. psubusb m6, m5 ; q2-q1
  2191. por m6, m4 ; abs(q2-q1)
  2192. %ifidn %1, mmx
  2193. mova m4, flim_I
  2194. pxor m3, m3
  2195. psubusb m0, m4
  2196. psubusb m1, m4
  2197. psubusb m7, m4
  2198. psubusb m6, m4
  2199. pcmpeqb m0, m3 ; abs(p3-p2) <= I
  2200. pcmpeqb m1, m3 ; abs(p2-p1) <= I
  2201. pcmpeqb m7, m3 ; abs(q3-q2) <= I
  2202. pcmpeqb m6, m3 ; abs(q2-q1) <= I
  2203. pand m0, m1
  2204. pand m7, m6
  2205. pand m0, m7
  2206. %else ; mmxext/sse2
  2207. pmaxub m0, m1
  2208. pmaxub m6, m7
  2209. pmaxub m0, m6
  2210. %endif
  2211. ; normal_limit and high_edge_variance for p1-p0, q1-q0
  2212. SWAP 7, 3 ; now m7 is zero
  2213. %ifidn %2, v
  2214. movrow m3, [dst_reg +mstride_reg] ; p0
  2215. %if mmsize == 16 && %4 == 8
  2216. movhps m3, [dst8_reg+mstride_reg]
  2217. %endif
  2218. %elifdef m12
  2219. SWAP 3, 12
  2220. %else
  2221. mova m3, p0backup
  2222. %endif
  2223. mova m1, m2
  2224. SWAP 1, 2
  2225. mova m6, m3
  2226. SWAP 3, 6
  2227. psubusb m1, m3 ; p1-p0
  2228. psubusb m6, m2 ; p0-p1
  2229. por m1, m6 ; abs(p1-p0)
  2230. %ifidn %1, mmx
  2231. mova m6, m1
  2232. psubusb m1, m4
  2233. psubusb m6, hev_thr
  2234. pcmpeqb m1, m7 ; abs(p1-p0) <= I
  2235. pcmpeqb m6, m7 ; abs(p1-p0) <= hev_thresh
  2236. pand m0, m1
  2237. mova mask_res, m6
  2238. %else ; mmxext/sse2
  2239. pmaxub m0, m1 ; max_I
  2240. SWAP 1, 4 ; max_hev_thresh
  2241. %endif
  2242. SWAP 6, 4 ; now m6 is I
  2243. %ifidn %2, v
  2244. movrow m4, [dst_reg] ; q0
  2245. %if mmsize == 16 && %4 == 8
  2246. movhps m4, [dst8_reg]
  2247. %endif
  2248. %elifdef m8
  2249. SWAP 4, 8
  2250. %else
  2251. mova m4, q0backup
  2252. %endif
  2253. mova m1, m4
  2254. SWAP 1, 4
  2255. mova m7, m5
  2256. SWAP 7, 5
  2257. psubusb m1, m5 ; q0-q1
  2258. psubusb m7, m4 ; q1-q0
  2259. por m1, m7 ; abs(q1-q0)
  2260. %ifidn %1, mmx
  2261. mova m7, m1
  2262. psubusb m1, m6
  2263. psubusb m7, hev_thr
  2264. pxor m6, m6
  2265. pcmpeqb m1, m6 ; abs(q1-q0) <= I
  2266. pcmpeqb m7, m6 ; abs(q1-q0) <= hev_thresh
  2267. mova m6, mask_res
  2268. pand m0, m1 ; abs([pq][321]-[pq][210]) <= I
  2269. pand m6, m7
  2270. %else ; mmxext/sse2
  2271. pxor m7, m7
  2272. pmaxub m0, m1
  2273. pmaxub m6, m1
  2274. psubusb m0, flim_I
  2275. psubusb m6, hev_thr
  2276. pcmpeqb m0, m7 ; max(abs(..)) <= I
  2277. pcmpeqb m6, m7 ; !(max(abs..) > thresh)
  2278. %endif
  2279. %ifdef m12
  2280. SWAP 6, 12
  2281. %else
  2282. mova mask_res, m6 ; !(abs(p1-p0) > hev_t || abs(q1-q0) > hev_t)
  2283. %endif
  2284. ; simple_limit
  2285. mova m1, m3
  2286. SWAP 1, 3
  2287. mova m6, m4 ; keep copies of p0/q0 around for later use
  2288. SWAP 6, 4
  2289. psubusb m1, m4 ; p0-q0
  2290. psubusb m6, m3 ; q0-p0
  2291. por m1, m6 ; abs(q0-p0)
  2292. paddusb m1, m1 ; m1=2*abs(q0-p0)
  2293. mova m7, m2
  2294. SWAP 7, 2
  2295. mova m6, m5
  2296. SWAP 6, 5
  2297. psubusb m7, m5 ; p1-q1
  2298. psubusb m6, m2 ; q1-p1
  2299. por m7, m6 ; abs(q1-p1)
  2300. pxor m6, m6
  2301. pand m7, [pb_FE]
  2302. psrlq m7, 1 ; abs(q1-p1)/2
  2303. paddusb m7, m1 ; abs(q0-p0)*2+abs(q1-p1)/2
  2304. psubusb m7, flim_E
  2305. pcmpeqb m7, m6 ; abs(q0-p0)*2+abs(q1-p1)/2 <= E
  2306. pand m0, m7 ; normal_limit result
  2307. ; filter_common; at this point, m2-m5=p1-q1 and m0 is filter_mask
  2308. %ifdef m8 ; x86-64 && sse2
  2309. mova m8, [pb_80]
  2310. %define pb_80_var m8
  2311. %else ; x86-32 or mmx/mmxext
  2312. %define pb_80_var [pb_80]
  2313. %endif
  2314. mova m1, m4
  2315. mova m7, m3
  2316. pxor m1, pb_80_var
  2317. pxor m7, pb_80_var
  2318. psubsb m1, m7 ; (signed) q0-p0
  2319. mova m6, m2
  2320. mova m7, m5
  2321. pxor m6, pb_80_var
  2322. pxor m7, pb_80_var
  2323. psubsb m6, m7 ; (signed) p1-q1
  2324. mova m7, mask_res
  2325. paddsb m6, m1
  2326. paddsb m6, m1
  2327. paddsb m6, m1
  2328. pand m6, m0
  2329. %ifdef m8
  2330. mova lim_res, m6 ; 3*(qp-p0)+(p1-q1) masked for filter_mbedge
  2331. pand lim_res, m7
  2332. %else
  2333. mova m0, m6
  2334. pand m0, m7
  2335. mova lim_res, m0
  2336. %endif
  2337. pandn m7, m6 ; 3*(q0-p0)+(p1-q1) masked for filter_common
  2338. mova m1, [pb_F8]
  2339. mova m6, m7
  2340. paddsb m7, [pb_3]
  2341. paddsb m6, [pb_4]
  2342. pand m7, m1
  2343. pand m6, m1
  2344. pxor m1, m1
  2345. pxor m0, m0
  2346. pcmpgtb m1, m7
  2347. psubb m0, m7
  2348. psrlq m7, 3 ; +f2
  2349. psrlq m0, 3 ; -f2
  2350. pand m0, m1
  2351. pandn m1, m7
  2352. psubusb m3, m0
  2353. paddusb m3, m1 ; p0+f2
  2354. pxor m1, m1
  2355. pxor m0, m0
  2356. pcmpgtb m0, m6
  2357. psubb m1, m6
  2358. psrlq m6, 3 ; +f1
  2359. psrlq m1, 3 ; -f1
  2360. pand m1, m0
  2361. pandn m0, m6
  2362. psubusb m4, m0
  2363. paddusb m4, m1 ; q0-f1
  2364. ; filter_mbedge (m2-m5 = p1-q1; lim_res carries w)
  2365. mova m7, [pw_63]
  2366. %ifdef m8
  2367. SWAP 1, 8
  2368. %else
  2369. mova m1, lim_res
  2370. %endif
  2371. pxor m0, m0
  2372. mova m6, m1
  2373. pcmpgtb m0, m1 ; which are negative
  2374. punpcklbw m6, m0 ; signed byte->word
  2375. punpckhbw m1, m0
  2376. mova lim_sign, m0
  2377. mova mask_res, m6 ; backup for later in filter
  2378. mova lim_res, m1
  2379. pmullw m6, [pw_27]
  2380. pmullw m1, [pw_27]
  2381. paddw m6, m7
  2382. paddw m1, m7
  2383. psraw m6, 7
  2384. psraw m1, 7
  2385. packsswb m6, m1 ; a0
  2386. pxor m1, m1
  2387. psubb m1, m6
  2388. pand m1, m0 ; -a0
  2389. pandn m0, m6 ; +a0
  2390. psubusb m3, m1
  2391. paddusb m4, m1
  2392. paddusb m3, m0 ; p0+a0
  2393. psubusb m4, m0 ; q0-a0
  2394. mova m6, mask_res
  2395. mova m1, lim_res
  2396. mova m0, lim_sign
  2397. pmullw m6, [pw_18]
  2398. pmullw m1, [pw_18]
  2399. paddw m6, m7
  2400. paddw m1, m7
  2401. psraw m6, 7
  2402. psraw m1, 7
  2403. packsswb m6, m1 ; a1
  2404. pxor m1, m1
  2405. psubb m1, m6
  2406. pand m1, m0 ; -a1
  2407. pandn m0, m6 ; +a1
  2408. psubusb m2, m1
  2409. paddusb m5, m1
  2410. paddusb m2, m0 ; p1+a1
  2411. psubusb m5, m0 ; q1-a1
  2412. %ifdef m8
  2413. SWAP 6, 12
  2414. SWAP 1, 8
  2415. %else
  2416. mova m6, mask_res
  2417. mova m1, lim_res
  2418. %endif
  2419. pmullw m6, [pw_9]
  2420. pmullw m1, [pw_9]
  2421. paddw m6, m7
  2422. paddw m1, m7
  2423. %ifdef m9
  2424. SWAP 7, 9
  2425. %else
  2426. mova m7, lim_sign
  2427. %endif
  2428. psraw m6, 7
  2429. psraw m1, 7
  2430. packsswb m6, m1 ; a1
  2431. pxor m0, m0
  2432. psubb m0, m6
  2433. pand m0, m7 ; -a1
  2434. pandn m7, m6 ; +a1
  2435. %ifdef m8
  2436. SWAP 1, 13
  2437. SWAP 6, 14
  2438. %else
  2439. mova m1, p2backup
  2440. mova m6, q2backup
  2441. %endif
  2442. psubusb m1, m0
  2443. paddusb m6, m0
  2444. paddusb m1, m7 ; p1+a1
  2445. psubusb m6, m7 ; q1-a1
  2446. ; store
  2447. %ifidn %2, v
  2448. movrow [dst2_reg+mstride_reg*4], m1
  2449. movrow [dst_reg +mstride_reg*2], m2
  2450. movrow [dst_reg +mstride_reg ], m3
  2451. movrow [dst_reg], m4
  2452. movrow [dst2_reg], m5
  2453. movrow [dst2_reg+ stride_reg ], m6
  2454. %if mmsize == 16 && %4 == 8
  2455. add dst8_reg, mstride_reg
  2456. movhps [dst8_reg+mstride_reg*2], m1
  2457. movhps [dst8_reg+mstride_reg ], m2
  2458. movhps [dst8_reg], m3
  2459. add dst8_reg, stride_reg
  2460. movhps [dst8_reg], m4
  2461. movhps [dst8_reg+ stride_reg ], m5
  2462. movhps [dst8_reg+ stride_reg*2], m6
  2463. %endif
  2464. %else ; h
  2465. inc dst_reg
  2466. inc dst2_reg
  2467. ; 4x8/16 transpose
  2468. TRANSPOSE4x4B 1, 2, 3, 4, 0
  2469. SBUTTERFLY bw, 5, 6, 0
  2470. %if mmsize == 8 ; mmx/mmxext (h)
  2471. WRITE_4x2D 1, 2, 3, 4, dst_reg, dst2_reg, mstride_reg, stride_reg
  2472. add dst_reg, 4
  2473. WRITE_2x4W m5, m6, dst2_reg, dst_reg, mstride_reg, stride_reg
  2474. %else ; sse2 (h)
  2475. lea dst8_reg, [dst8_reg+mstride_reg+1]
  2476. WRITE_4x4D 1, 2, 3, 4, dst_reg, dst2_reg, dst8_reg, mstride_reg, stride_reg, %4
  2477. lea dst_reg, [dst2_reg+mstride_reg+4]
  2478. lea dst8_reg, [dst8_reg+mstride_reg+4]
  2479. %ifidn %1, sse4
  2480. add dst2_reg, 4
  2481. %endif
  2482. WRITE_8W m5, dst2_reg, dst_reg, mstride_reg, stride_reg
  2483. %ifidn %1, sse4
  2484. lea dst2_reg, [dst8_reg+ stride_reg]
  2485. %endif
  2486. WRITE_8W m6, dst2_reg, dst8_reg, mstride_reg, stride_reg
  2487. %endif
  2488. %endif
  2489. %if mmsize == 8
  2490. %if %4 == 8 ; chroma
  2491. %ifidn %2, h
  2492. sub dst_reg, 5
  2493. %endif
  2494. cmp dst_reg, dst8_reg
  2495. mov dst_reg, dst8_reg
  2496. jnz .next8px
  2497. %else
  2498. %ifidn %2, h
  2499. lea dst_reg, [dst_reg + stride_reg*8-5]
  2500. %else ; v
  2501. add dst_reg, 8
  2502. %endif
  2503. dec cnt_reg
  2504. jg .next8px
  2505. %endif
  2506. %endif
  2507. %ifndef m8 ; sse2 on x86-32 or mmx/mmxext
  2508. mov rsp, stack_reg ; restore stack pointer
  2509. %endif
  2510. RET
  2511. %endmacro
  2512. INIT_MMX
  2513. %define SPLATB_REG SPLATB_REG_MMX
  2514. MBEDGE_LOOPFILTER mmx, v, 6, 16, 0
  2515. MBEDGE_LOOPFILTER mmx, h, 6, 16, 0
  2516. MBEDGE_LOOPFILTER mmx, v, 6, 8, 0
  2517. MBEDGE_LOOPFILTER mmx, h, 6, 8, 0
  2518. %define SPLATB_REG SPLATB_REG_MMXEXT
  2519. MBEDGE_LOOPFILTER mmxext, v, 6, 16, 0
  2520. MBEDGE_LOOPFILTER mmxext, h, 6, 16, 0
  2521. MBEDGE_LOOPFILTER mmxext, v, 6, 8, 0
  2522. MBEDGE_LOOPFILTER mmxext, h, 6, 8, 0
  2523. INIT_XMM
  2524. %define SPLATB_REG SPLATB_REG_SSE2
  2525. %define WRITE_8W WRITE_8W_SSE2
  2526. MBEDGE_LOOPFILTER sse2, v, 5, 16, 15
  2527. %ifdef m8
  2528. MBEDGE_LOOPFILTER sse2, h, 5, 16, 15
  2529. %else
  2530. MBEDGE_LOOPFILTER sse2, h, 6, 16, 15
  2531. %endif
  2532. MBEDGE_LOOPFILTER sse2, v, 6, 8, 15
  2533. MBEDGE_LOOPFILTER sse2, h, 6, 8, 15
  2534. %define SPLATB_REG SPLATB_REG_SSSE3
  2535. MBEDGE_LOOPFILTER ssse3, v, 5, 16, 15
  2536. %ifdef m8
  2537. MBEDGE_LOOPFILTER ssse3, h, 5, 16, 15
  2538. %else
  2539. MBEDGE_LOOPFILTER ssse3, h, 6, 16, 15
  2540. %endif
  2541. MBEDGE_LOOPFILTER ssse3, v, 6, 8, 15
  2542. MBEDGE_LOOPFILTER ssse3, h, 6, 8, 15
  2543. %define WRITE_8W WRITE_8W_SSE4
  2544. %ifdef m8
  2545. MBEDGE_LOOPFILTER sse4, h, 5, 16, 15
  2546. %else
  2547. MBEDGE_LOOPFILTER sse4, h, 6, 16, 15
  2548. %endif
  2549. MBEDGE_LOOPFILTER sse4, h, 6, 8, 15