You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2693 lines
75KB

  1. ;******************************************************************************
  2. ;* VP8 MMXEXT optimizations
  3. ;* Copyright (c) 2010 Ronald S. Bultje <rsbultje@gmail.com>
  4. ;* Copyright (c) 2010 Jason Garrett-Glaser <darkshikari@gmail.com>
  5. ;*
  6. ;* This file is part of FFmpeg.
  7. ;*
  8. ;* FFmpeg is free software; you can redistribute it and/or
  9. ;* modify it under the terms of the GNU Lesser General Public
  10. ;* License as published by the Free Software Foundation; either
  11. ;* version 2.1 of the License, or (at your option) any later version.
  12. ;*
  13. ;* FFmpeg is distributed in the hope that it will be useful,
  14. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. ;* Lesser General Public License for more details.
  17. ;*
  18. ;* You should have received a copy of the GNU Lesser General Public
  19. ;* License along with FFmpeg; if not, write to the Free Software
  20. ;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. ;******************************************************************************
  22. %include "x86inc.asm"
  23. %include "x86util.asm"
  24. SECTION_RODATA
  25. fourtap_filter_hw_m: times 4 dw -6, 123
  26. times 4 dw 12, -1
  27. times 4 dw -9, 93
  28. times 4 dw 50, -6
  29. times 4 dw -6, 50
  30. times 4 dw 93, -9
  31. times 4 dw -1, 12
  32. times 4 dw 123, -6
  33. sixtap_filter_hw_m: times 4 dw 2, -11
  34. times 4 dw 108, 36
  35. times 4 dw -8, 1
  36. times 4 dw 3, -16
  37. times 4 dw 77, 77
  38. times 4 dw -16, 3
  39. times 4 dw 1, -8
  40. times 4 dw 36, 108
  41. times 4 dw -11, 2
  42. fourtap_filter_hb_m: times 8 db -6, 123
  43. times 8 db 12, -1
  44. times 8 db -9, 93
  45. times 8 db 50, -6
  46. times 8 db -6, 50
  47. times 8 db 93, -9
  48. times 8 db -1, 12
  49. times 8 db 123, -6
  50. sixtap_filter_hb_m: times 8 db 2, 1
  51. times 8 db -11, 108
  52. times 8 db 36, -8
  53. times 8 db 3, 3
  54. times 8 db -16, 77
  55. times 8 db 77, -16
  56. times 8 db 1, 2
  57. times 8 db -8, 36
  58. times 8 db 108, -11
  59. fourtap_filter_v_m: times 8 dw -6
  60. times 8 dw 123
  61. times 8 dw 12
  62. times 8 dw -1
  63. times 8 dw -9
  64. times 8 dw 93
  65. times 8 dw 50
  66. times 8 dw -6
  67. times 8 dw -6
  68. times 8 dw 50
  69. times 8 dw 93
  70. times 8 dw -9
  71. times 8 dw -1
  72. times 8 dw 12
  73. times 8 dw 123
  74. times 8 dw -6
  75. sixtap_filter_v_m: times 8 dw 2
  76. times 8 dw -11
  77. times 8 dw 108
  78. times 8 dw 36
  79. times 8 dw -8
  80. times 8 dw 1
  81. times 8 dw 3
  82. times 8 dw -16
  83. times 8 dw 77
  84. times 8 dw 77
  85. times 8 dw -16
  86. times 8 dw 3
  87. times 8 dw 1
  88. times 8 dw -8
  89. times 8 dw 36
  90. times 8 dw 108
  91. times 8 dw -11
  92. times 8 dw 2
  93. bilinear_filter_vw_m: times 8 dw 1
  94. times 8 dw 2
  95. times 8 dw 3
  96. times 8 dw 4
  97. times 8 dw 5
  98. times 8 dw 6
  99. times 8 dw 7
  100. bilinear_filter_vb_m: times 8 db 7, 1
  101. times 8 db 6, 2
  102. times 8 db 5, 3
  103. times 8 db 4, 4
  104. times 8 db 3, 5
  105. times 8 db 2, 6
  106. times 8 db 1, 7
  107. %ifdef PIC
  108. %define fourtap_filter_hw r11
  109. %define sixtap_filter_hw r11
  110. %define fourtap_filter_hb r11
  111. %define sixtap_filter_hb r11
  112. %define fourtap_filter_v r11
  113. %define sixtap_filter_v r11
  114. %define bilinear_filter_vw r11
  115. %define bilinear_filter_vb r11
  116. %else
  117. %define fourtap_filter_hw fourtap_filter_hw_m
  118. %define sixtap_filter_hw sixtap_filter_hw_m
  119. %define fourtap_filter_hb fourtap_filter_hb_m
  120. %define sixtap_filter_hb sixtap_filter_hb_m
  121. %define fourtap_filter_v fourtap_filter_v_m
  122. %define sixtap_filter_v sixtap_filter_v_m
  123. %define bilinear_filter_vw bilinear_filter_vw_m
  124. %define bilinear_filter_vb bilinear_filter_vb_m
  125. %endif
  126. filter_h2_shuf: db 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8
  127. filter_h4_shuf: db 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10
  128. filter_h6_shuf1: db 0, 5, 1, 6, 2, 7, 3, 8, 4, 9, 5, 10, 6, 11, 7, 12
  129. filter_h6_shuf2: db 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9
  130. filter_h6_shuf3: db 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11
  131. pw_20091: times 4 dw 20091
  132. pw_17734: times 4 dw 17734
  133. cextern pb_1
  134. cextern pw_3
  135. cextern pb_3
  136. cextern pw_4
  137. cextern pb_4
  138. cextern pw_9
  139. cextern pw_18
  140. cextern pw_27
  141. cextern pw_63
  142. cextern pw_64
  143. cextern pb_80
  144. cextern pb_F8
  145. cextern pb_FE
  146. SECTION .text
  147. ;-----------------------------------------------------------------------------
  148. ; subpel MC functions:
  149. ;
  150. ; void put_vp8_epel<size>_h<htap>v<vtap>_<opt>(uint8_t *dst, int deststride,
  151. ; uint8_t *src, int srcstride,
  152. ; int height, int mx, int my);
  153. ;-----------------------------------------------------------------------------
  154. %macro FILTER_SSSE3 3
  155. cglobal put_vp8_epel%1_h6_ssse3, 6, 6, %2
  156. lea r5d, [r5*3]
  157. mova m3, [filter_h6_shuf2]
  158. mova m4, [filter_h6_shuf3]
  159. %ifdef PIC
  160. lea r11, [sixtap_filter_hb_m]
  161. %endif
  162. mova m5, [sixtap_filter_hb+r5*8-48] ; set up 6tap filter in bytes
  163. mova m6, [sixtap_filter_hb+r5*8-32]
  164. mova m7, [sixtap_filter_hb+r5*8-16]
  165. .nextrow
  166. movu m0, [r2-2]
  167. mova m1, m0
  168. mova m2, m0
  169. %ifidn %1, 4
  170. ; For epel4, we need 9 bytes, but only 8 get loaded; to compensate, do the
  171. ; shuffle with a memory operand
  172. punpcklbw m0, [r2+3]
  173. %else
  174. pshufb m0, [filter_h6_shuf1]
  175. %endif
  176. pshufb m1, m3
  177. pshufb m2, m4
  178. pmaddubsw m0, m5
  179. pmaddubsw m1, m6
  180. pmaddubsw m2, m7
  181. paddsw m0, m1
  182. paddsw m0, m2
  183. paddsw m0, [pw_64]
  184. psraw m0, 7
  185. packuswb m0, m0
  186. movh [r0], m0 ; store
  187. ; go to next line
  188. add r0, r1
  189. add r2, r3
  190. dec r4 ; next row
  191. jg .nextrow
  192. REP_RET
  193. cglobal put_vp8_epel%1_h4_ssse3, 6, 6, %3
  194. shl r5d, 4
  195. mova m2, [pw_64]
  196. mova m3, [filter_h2_shuf]
  197. mova m4, [filter_h4_shuf]
  198. %ifdef PIC
  199. lea r11, [fourtap_filter_hb_m]
  200. %endif
  201. mova m5, [fourtap_filter_hb+r5-16] ; set up 4tap filter in bytes
  202. mova m6, [fourtap_filter_hb+r5]
  203. .nextrow
  204. movu m0, [r2-1]
  205. mova m1, m0
  206. pshufb m0, m3
  207. pshufb m1, m4
  208. pmaddubsw m0, m5
  209. pmaddubsw m1, m6
  210. paddsw m0, m2
  211. paddsw m0, m1
  212. psraw m0, 7
  213. packuswb m0, m0
  214. movh [r0], m0 ; store
  215. ; go to next line
  216. add r0, r1
  217. add r2, r3
  218. dec r4 ; next row
  219. jg .nextrow
  220. REP_RET
  221. cglobal put_vp8_epel%1_v4_ssse3, 7, 7, %2
  222. shl r6d, 4
  223. %ifdef PIC
  224. lea r11, [fourtap_filter_hb_m]
  225. %endif
  226. mova m5, [fourtap_filter_hb+r6-16]
  227. mova m6, [fourtap_filter_hb+r6]
  228. mova m7, [pw_64]
  229. ; read 3 lines
  230. sub r2, r3
  231. movh m0, [r2]
  232. movh m1, [r2+ r3]
  233. movh m2, [r2+2*r3]
  234. add r2, r3
  235. .nextrow
  236. movh m3, [r2+2*r3] ; read new row
  237. mova m4, m0
  238. mova m0, m1
  239. punpcklbw m4, m1
  240. mova m1, m2
  241. punpcklbw m2, m3
  242. pmaddubsw m4, m5
  243. pmaddubsw m2, m6
  244. paddsw m4, m2
  245. mova m2, m3
  246. paddsw m4, m7
  247. psraw m4, 7
  248. packuswb m4, m4
  249. movh [r0], m4
  250. ; go to next line
  251. add r0, r1
  252. add r2, r3
  253. dec r4 ; next row
  254. jg .nextrow
  255. REP_RET
  256. cglobal put_vp8_epel%1_v6_ssse3, 7, 7, %2
  257. lea r6d, [r6*3]
  258. %ifdef PIC
  259. lea r11, [sixtap_filter_hb_m]
  260. %endif
  261. lea r6, [sixtap_filter_hb+r6*8]
  262. ; read 5 lines
  263. sub r2, r3
  264. sub r2, r3
  265. movh m0, [r2]
  266. movh m1, [r2+r3]
  267. movh m2, [r2+r3*2]
  268. lea r2, [r2+r3*2]
  269. add r2, r3
  270. movh m3, [r2]
  271. movh m4, [r2+r3]
  272. .nextrow
  273. movh m5, [r2+2*r3] ; read new row
  274. mova m6, m0
  275. punpcklbw m6, m5
  276. mova m0, m1
  277. punpcklbw m1, m2
  278. mova m7, m3
  279. punpcklbw m7, m4
  280. pmaddubsw m6, [r6-48]
  281. pmaddubsw m1, [r6-32]
  282. pmaddubsw m7, [r6-16]
  283. paddsw m6, m1
  284. paddsw m6, m7
  285. mova m1, m2
  286. paddsw m6, [pw_64]
  287. mova m2, m3
  288. psraw m6, 7
  289. mova m3, m4
  290. packuswb m6, m6
  291. mova m4, m5
  292. movh [r0], m6
  293. ; go to next line
  294. add r0, r1
  295. add r2, r3
  296. dec r4 ; next row
  297. jg .nextrow
  298. REP_RET
  299. %endmacro
  300. INIT_MMX
  301. FILTER_SSSE3 4, 0, 0
  302. INIT_XMM
  303. FILTER_SSSE3 8, 8, 7
  304. ; 4x4 block, H-only 4-tap filter
  305. cglobal put_vp8_epel4_h4_mmxext, 6, 6
  306. shl r5d, 4
  307. %ifdef PIC
  308. lea r11, [fourtap_filter_hw_m]
  309. %endif
  310. movq mm4, [fourtap_filter_hw+r5-16] ; set up 4tap filter in words
  311. movq mm5, [fourtap_filter_hw+r5]
  312. movq mm7, [pw_64]
  313. pxor mm6, mm6
  314. .nextrow
  315. movq mm1, [r2-1] ; (ABCDEFGH) load 8 horizontal pixels
  316. ; first set of 2 pixels
  317. movq mm2, mm1 ; byte ABCD..
  318. punpcklbw mm1, mm6 ; byte->word ABCD
  319. pshufw mm0, mm2, 9 ; byte CDEF..
  320. punpcklbw mm0, mm6 ; byte->word CDEF
  321. pshufw mm3, mm1, 0x94 ; word ABBC
  322. pshufw mm1, mm0, 0x94 ; word CDDE
  323. pmaddwd mm3, mm4 ; multiply 2px with F0/F1
  324. movq mm0, mm1 ; backup for second set of pixels
  325. pmaddwd mm1, mm5 ; multiply 2px with F2/F3
  326. paddd mm3, mm1 ; finish 1st 2px
  327. ; second set of 2 pixels, use backup of above
  328. punpckhbw mm2, mm6 ; byte->word EFGH
  329. pmaddwd mm0, mm4 ; multiply backed up 2px with F0/F1
  330. pshufw mm1, mm2, 0x94 ; word EFFG
  331. pmaddwd mm1, mm5 ; multiply 2px with F2/F3
  332. paddd mm0, mm1 ; finish 2nd 2px
  333. ; merge two sets of 2 pixels into one set of 4, round/clip/store
  334. packssdw mm3, mm0 ; merge dword->word (4px)
  335. paddsw mm3, mm7 ; rounding
  336. psraw mm3, 7
  337. packuswb mm3, mm6 ; clip and word->bytes
  338. movd [r0], mm3 ; store
  339. ; go to next line
  340. add r0, r1
  341. add r2, r3
  342. dec r4 ; next row
  343. jg .nextrow
  344. REP_RET
  345. ; 4x4 block, H-only 6-tap filter
  346. cglobal put_vp8_epel4_h6_mmxext, 6, 6
  347. lea r5d, [r5*3]
  348. %ifdef PIC
  349. lea r11, [sixtap_filter_hw_m]
  350. %endif
  351. movq mm4, [sixtap_filter_hw+r5*8-48] ; set up 4tap filter in words
  352. movq mm5, [sixtap_filter_hw+r5*8-32]
  353. movq mm6, [sixtap_filter_hw+r5*8-16]
  354. movq mm7, [pw_64]
  355. pxor mm3, mm3
  356. .nextrow
  357. movq mm1, [r2-2] ; (ABCDEFGH) load 8 horizontal pixels
  358. ; first set of 2 pixels
  359. movq mm2, mm1 ; byte ABCD..
  360. punpcklbw mm1, mm3 ; byte->word ABCD
  361. pshufw mm0, mm2, 0x9 ; byte CDEF..
  362. punpckhbw mm2, mm3 ; byte->word EFGH
  363. punpcklbw mm0, mm3 ; byte->word CDEF
  364. pshufw mm1, mm1, 0x94 ; word ABBC
  365. pshufw mm2, mm2, 0x94 ; word EFFG
  366. pmaddwd mm1, mm4 ; multiply 2px with F0/F1
  367. pshufw mm3, mm0, 0x94 ; word CDDE
  368. movq mm0, mm3 ; backup for second set of pixels
  369. pmaddwd mm3, mm5 ; multiply 2px with F2/F3
  370. paddd mm1, mm3 ; add to 1st 2px cache
  371. movq mm3, mm2 ; backup for second set of pixels
  372. pmaddwd mm2, mm6 ; multiply 2px with F4/F5
  373. paddd mm1, mm2 ; finish 1st 2px
  374. ; second set of 2 pixels, use backup of above
  375. movd mm2, [r2+3] ; byte FGHI (prevent overreads)
  376. pmaddwd mm0, mm4 ; multiply 1st backed up 2px with F0/F1
  377. pmaddwd mm3, mm5 ; multiply 2nd backed up 2px with F2/F3
  378. paddd mm0, mm3 ; add to 2nd 2px cache
  379. pxor mm3, mm3
  380. punpcklbw mm2, mm3 ; byte->word FGHI
  381. pshufw mm2, mm2, 0xE9 ; word GHHI
  382. pmaddwd mm2, mm6 ; multiply 2px with F4/F5
  383. paddd mm0, mm2 ; finish 2nd 2px
  384. ; merge two sets of 2 pixels into one set of 4, round/clip/store
  385. packssdw mm1, mm0 ; merge dword->word (4px)
  386. paddsw mm1, mm7 ; rounding
  387. psraw mm1, 7
  388. packuswb mm1, mm3 ; clip and word->bytes
  389. movd [r0], mm1 ; store
  390. ; go to next line
  391. add r0, r1
  392. add r2, r3
  393. dec r4 ; next row
  394. jg .nextrow
  395. REP_RET
  396. ; 4x4 block, H-only 4-tap filter
  397. INIT_XMM
  398. cglobal put_vp8_epel8_h4_sse2, 6, 6, 8
  399. shl r5d, 4
  400. %ifdef PIC
  401. lea r11, [fourtap_filter_hw_m]
  402. %endif
  403. mova m5, [fourtap_filter_hw+r5-16] ; set up 4tap filter in words
  404. mova m6, [fourtap_filter_hw+r5]
  405. pxor m7, m7
  406. .nextrow
  407. movh m0, [r2-1]
  408. punpcklbw m0, m7 ; ABCDEFGH
  409. mova m1, m0
  410. mova m2, m0
  411. mova m3, m0
  412. psrldq m1, 2 ; BCDEFGH
  413. psrldq m2, 4 ; CDEFGH
  414. psrldq m3, 6 ; DEFGH
  415. punpcklwd m0, m1 ; ABBCCDDE
  416. punpcklwd m2, m3 ; CDDEEFFG
  417. pmaddwd m0, m5
  418. pmaddwd m2, m6
  419. paddd m0, m2
  420. movh m1, [r2+3]
  421. punpcklbw m1, m7 ; ABCDEFGH
  422. mova m2, m1
  423. mova m3, m1
  424. mova m4, m1
  425. psrldq m2, 2 ; BCDEFGH
  426. psrldq m3, 4 ; CDEFGH
  427. psrldq m4, 6 ; DEFGH
  428. punpcklwd m1, m2 ; ABBCCDDE
  429. punpcklwd m3, m4 ; CDDEEFFG
  430. pmaddwd m1, m5
  431. pmaddwd m3, m6
  432. paddd m1, m3
  433. packssdw m0, m1
  434. paddsw m0, [pw_64]
  435. psraw m0, 7
  436. packuswb m0, m7
  437. movh [r0], m0 ; store
  438. ; go to next line
  439. add r0, r1
  440. add r2, r3
  441. dec r4 ; next row
  442. jg .nextrow
  443. REP_RET
  444. cglobal put_vp8_epel8_h6_sse2, 6, 6, 8
  445. lea r5d, [r5*3]
  446. %ifdef PIC
  447. lea r11, [sixtap_filter_hw_m]
  448. %endif
  449. lea r5, [sixtap_filter_hw+r5*8]
  450. pxor m7, m7
  451. .nextrow
  452. movu m0, [r2-2]
  453. mova m6, m0
  454. mova m4, m0
  455. punpcklbw m0, m7 ; ABCDEFGHI
  456. mova m1, m0
  457. mova m2, m0
  458. mova m3, m0
  459. psrldq m1, 2 ; BCDEFGH
  460. psrldq m2, 4 ; CDEFGH
  461. psrldq m3, 6 ; DEFGH
  462. psrldq m4, 4
  463. punpcklbw m4, m7 ; EFGH
  464. mova m5, m4
  465. psrldq m5, 2 ; FGH
  466. punpcklwd m0, m1 ; ABBCCDDE
  467. punpcklwd m2, m3 ; CDDEEFFG
  468. punpcklwd m4, m5 ; EFFGGHHI
  469. pmaddwd m0, [r5-48]
  470. pmaddwd m2, [r5-32]
  471. pmaddwd m4, [r5-16]
  472. paddd m0, m2
  473. paddd m0, m4
  474. psrldq m6, 4
  475. mova m4, m6
  476. punpcklbw m6, m7 ; ABCDEFGHI
  477. mova m1, m6
  478. mova m2, m6
  479. mova m3, m6
  480. psrldq m1, 2 ; BCDEFGH
  481. psrldq m2, 4 ; CDEFGH
  482. psrldq m3, 6 ; DEFGH
  483. psrldq m4, 4
  484. punpcklbw m4, m7 ; EFGH
  485. mova m5, m4
  486. psrldq m5, 2 ; FGH
  487. punpcklwd m6, m1 ; ABBCCDDE
  488. punpcklwd m2, m3 ; CDDEEFFG
  489. punpcklwd m4, m5 ; EFFGGHHI
  490. pmaddwd m6, [r5-48]
  491. pmaddwd m2, [r5-32]
  492. pmaddwd m4, [r5-16]
  493. paddd m6, m2
  494. paddd m6, m4
  495. packssdw m0, m6
  496. paddsw m0, [pw_64]
  497. psraw m0, 7
  498. packuswb m0, m7
  499. movh [r0], m0 ; store
  500. ; go to next line
  501. add r0, r1
  502. add r2, r3
  503. dec r4 ; next row
  504. jg .nextrow
  505. REP_RET
  506. %macro FILTER_V 3
  507. ; 4x4 block, V-only 4-tap filter
  508. cglobal put_vp8_epel%2_v4_%1, 7, 7, %3
  509. shl r6d, 5
  510. %ifdef PIC
  511. lea r11, [fourtap_filter_v_m]
  512. %endif
  513. lea r6, [fourtap_filter_v+r6-32]
  514. mova m6, [pw_64]
  515. pxor m7, m7
  516. mova m5, [r6+48]
  517. ; read 3 lines
  518. sub r2, r3
  519. movh m0, [r2]
  520. movh m1, [r2+ r3]
  521. movh m2, [r2+2*r3]
  522. add r2, r3
  523. punpcklbw m0, m7
  524. punpcklbw m1, m7
  525. punpcklbw m2, m7
  526. .nextrow
  527. ; first calculate negative taps (to prevent losing positive overflows)
  528. movh m4, [r2+2*r3] ; read new row
  529. punpcklbw m4, m7
  530. mova m3, m4
  531. pmullw m0, [r6+0]
  532. pmullw m4, m5
  533. paddsw m4, m0
  534. ; then calculate positive taps
  535. mova m0, m1
  536. pmullw m1, [r6+16]
  537. paddsw m4, m1
  538. mova m1, m2
  539. pmullw m2, [r6+32]
  540. paddsw m4, m2
  541. mova m2, m3
  542. ; round/clip/store
  543. paddsw m4, m6
  544. psraw m4, 7
  545. packuswb m4, m7
  546. movh [r0], m4
  547. ; go to next line
  548. add r0, r1
  549. add r2, r3
  550. dec r4 ; next row
  551. jg .nextrow
  552. REP_RET
  553. ; 4x4 block, V-only 6-tap filter
  554. cglobal put_vp8_epel%2_v6_%1, 7, 7, %3
  555. shl r6d, 4
  556. lea r6, [r6*3]
  557. %ifdef PIC
  558. lea r11, [sixtap_filter_v_m]
  559. %endif
  560. lea r6, [sixtap_filter_v+r6-96]
  561. pxor m7, m7
  562. ; read 5 lines
  563. sub r2, r3
  564. sub r2, r3
  565. movh m0, [r2]
  566. movh m1, [r2+r3]
  567. movh m2, [r2+r3*2]
  568. lea r2, [r2+r3*2]
  569. add r2, r3
  570. movh m3, [r2]
  571. movh m4, [r2+r3]
  572. punpcklbw m0, m7
  573. punpcklbw m1, m7
  574. punpcklbw m2, m7
  575. punpcklbw m3, m7
  576. punpcklbw m4, m7
  577. .nextrow
  578. ; first calculate negative taps (to prevent losing positive overflows)
  579. mova m5, m1
  580. pmullw m5, [r6+16]
  581. mova m6, m4
  582. pmullw m6, [r6+64]
  583. paddsw m6, m5
  584. ; then calculate positive taps
  585. movh m5, [r2+2*r3] ; read new row
  586. punpcklbw m5, m7
  587. pmullw m0, [r6+0]
  588. paddsw m6, m0
  589. mova m0, m1
  590. mova m1, m2
  591. pmullw m2, [r6+32]
  592. paddsw m6, m2
  593. mova m2, m3
  594. pmullw m3, [r6+48]
  595. paddsw m6, m3
  596. mova m3, m4
  597. mova m4, m5
  598. pmullw m5, [r6+80]
  599. paddsw m6, m5
  600. ; round/clip/store
  601. paddsw m6, [pw_64]
  602. psraw m6, 7
  603. packuswb m6, m7
  604. movh [r0], m6
  605. ; go to next line
  606. add r0, r1
  607. add r2, r3
  608. dec r4 ; next row
  609. jg .nextrow
  610. REP_RET
  611. %endmacro
  612. INIT_MMX
  613. FILTER_V mmxext, 4, 0
  614. INIT_XMM
  615. FILTER_V sse2, 8, 8
  616. %macro FILTER_BILINEAR 3
  617. cglobal put_vp8_bilinear%2_v_%1, 7,7,%3
  618. mov r5d, 8*16
  619. shl r6d, 4
  620. sub r5d, r6d
  621. %ifdef PIC
  622. lea r11, [bilinear_filter_vw_m]
  623. %endif
  624. pxor m6, m6
  625. mova m4, [bilinear_filter_vw+r5-16]
  626. mova m5, [bilinear_filter_vw+r6-16]
  627. .nextrow
  628. movh m0, [r2+r3*0]
  629. movh m1, [r2+r3*1]
  630. movh m3, [r2+r3*2]
  631. punpcklbw m0, m6
  632. punpcklbw m1, m6
  633. punpcklbw m3, m6
  634. mova m2, m1
  635. pmullw m0, m4
  636. pmullw m1, m5
  637. pmullw m2, m4
  638. pmullw m3, m5
  639. paddsw m0, m1
  640. paddsw m2, m3
  641. psraw m0, 2
  642. psraw m2, 2
  643. pavgw m0, m6
  644. pavgw m2, m6
  645. %ifidn %1, mmxext
  646. packuswb m0, m0
  647. packuswb m2, m2
  648. movh [r0+r1*0], m0
  649. movh [r0+r1*1], m2
  650. %else
  651. packuswb m0, m2
  652. movh [r0+r1*0], m0
  653. movhps [r0+r1*1], m0
  654. %endif
  655. lea r0, [r0+r1*2]
  656. lea r2, [r2+r3*2]
  657. sub r4, 2
  658. jg .nextrow
  659. REP_RET
  660. cglobal put_vp8_bilinear%2_h_%1, 7,7,%3
  661. mov r6d, 8*16
  662. shl r5d, 4
  663. sub r6d, r5d
  664. %ifdef PIC
  665. lea r11, [bilinear_filter_vw_m]
  666. %endif
  667. pxor m6, m6
  668. mova m4, [bilinear_filter_vw+r6-16]
  669. mova m5, [bilinear_filter_vw+r5-16]
  670. .nextrow
  671. movh m0, [r2+r3*0+0]
  672. movh m1, [r2+r3*0+1]
  673. movh m2, [r2+r3*1+0]
  674. movh m3, [r2+r3*1+1]
  675. punpcklbw m0, m6
  676. punpcklbw m1, m6
  677. punpcklbw m2, m6
  678. punpcklbw m3, m6
  679. pmullw m0, m4
  680. pmullw m1, m5
  681. pmullw m2, m4
  682. pmullw m3, m5
  683. paddsw m0, m1
  684. paddsw m2, m3
  685. psraw m0, 2
  686. psraw m2, 2
  687. pavgw m0, m6
  688. pavgw m2, m6
  689. %ifidn %1, mmxext
  690. packuswb m0, m0
  691. packuswb m2, m2
  692. movh [r0+r1*0], m0
  693. movh [r0+r1*1], m2
  694. %else
  695. packuswb m0, m2
  696. movh [r0+r1*0], m0
  697. movhps [r0+r1*1], m0
  698. %endif
  699. lea r0, [r0+r1*2]
  700. lea r2, [r2+r3*2]
  701. sub r4, 2
  702. jg .nextrow
  703. REP_RET
  704. %endmacro
  705. INIT_MMX
  706. FILTER_BILINEAR mmxext, 4, 0
  707. INIT_XMM
  708. FILTER_BILINEAR sse2, 8, 7
  709. %macro FILTER_BILINEAR_SSSE3 1
  710. cglobal put_vp8_bilinear%1_v_ssse3, 7,7
  711. shl r6d, 4
  712. %ifdef PIC
  713. lea r11, [bilinear_filter_vb_m]
  714. %endif
  715. pxor m4, m4
  716. mova m3, [bilinear_filter_vb+r6-16]
  717. .nextrow
  718. movh m0, [r2+r3*0]
  719. movh m1, [r2+r3*1]
  720. movh m2, [r2+r3*2]
  721. punpcklbw m0, m1
  722. punpcklbw m1, m2
  723. pmaddubsw m0, m3
  724. pmaddubsw m1, m3
  725. psraw m0, 2
  726. psraw m1, 2
  727. pavgw m0, m4
  728. pavgw m1, m4
  729. %if mmsize==8
  730. packuswb m0, m0
  731. packuswb m1, m1
  732. movh [r0+r1*0], m0
  733. movh [r0+r1*1], m1
  734. %else
  735. packuswb m0, m1
  736. movh [r0+r1*0], m0
  737. movhps [r0+r1*1], m0
  738. %endif
  739. lea r0, [r0+r1*2]
  740. lea r2, [r2+r3*2]
  741. sub r4, 2
  742. jg .nextrow
  743. REP_RET
  744. cglobal put_vp8_bilinear%1_h_ssse3, 7,7
  745. shl r5d, 4
  746. %ifdef PIC
  747. lea r11, [bilinear_filter_vb_m]
  748. %endif
  749. pxor m4, m4
  750. mova m2, [filter_h2_shuf]
  751. mova m3, [bilinear_filter_vb+r5-16]
  752. .nextrow
  753. movu m0, [r2+r3*0]
  754. movu m1, [r2+r3*1]
  755. pshufb m0, m2
  756. pshufb m1, m2
  757. pmaddubsw m0, m3
  758. pmaddubsw m1, m3
  759. psraw m0, 2
  760. psraw m1, 2
  761. pavgw m0, m4
  762. pavgw m1, m4
  763. %if mmsize==8
  764. packuswb m0, m0
  765. packuswb m1, m1
  766. movh [r0+r1*0], m0
  767. movh [r0+r1*1], m1
  768. %else
  769. packuswb m0, m1
  770. movh [r0+r1*0], m0
  771. movhps [r0+r1*1], m0
  772. %endif
  773. lea r0, [r0+r1*2]
  774. lea r2, [r2+r3*2]
  775. sub r4, 2
  776. jg .nextrow
  777. REP_RET
  778. %endmacro
  779. INIT_MMX
  780. FILTER_BILINEAR_SSSE3 4
  781. INIT_XMM
  782. FILTER_BILINEAR_SSSE3 8
  783. cglobal put_vp8_pixels8_mmx, 5,5
  784. .nextrow:
  785. movq mm0, [r2+r3*0]
  786. movq mm1, [r2+r3*1]
  787. lea r2, [r2+r3*2]
  788. movq [r0+r1*0], mm0
  789. movq [r0+r1*1], mm1
  790. lea r0, [r0+r1*2]
  791. sub r4d, 2
  792. jg .nextrow
  793. REP_RET
  794. cglobal put_vp8_pixels16_mmx, 5,5
  795. .nextrow:
  796. movq mm0, [r2+r3*0+0]
  797. movq mm1, [r2+r3*0+8]
  798. movq mm2, [r2+r3*1+0]
  799. movq mm3, [r2+r3*1+8]
  800. lea r2, [r2+r3*2]
  801. movq [r0+r1*0+0], mm0
  802. movq [r0+r1*0+8], mm1
  803. movq [r0+r1*1+0], mm2
  804. movq [r0+r1*1+8], mm3
  805. lea r0, [r0+r1*2]
  806. sub r4d, 2
  807. jg .nextrow
  808. REP_RET
  809. cglobal put_vp8_pixels16_sse, 5,5,2
  810. .nextrow:
  811. movups xmm0, [r2+r3*0]
  812. movups xmm1, [r2+r3*1]
  813. lea r2, [r2+r3*2]
  814. movaps [r0+r1*0], xmm0
  815. movaps [r0+r1*1], xmm1
  816. lea r0, [r0+r1*2]
  817. sub r4d, 2
  818. jg .nextrow
  819. REP_RET
  820. ;-----------------------------------------------------------------------------
  821. ; void vp8_idct_dc_add_<opt>(uint8_t *dst, DCTELEM block[16], int stride);
  822. ;-----------------------------------------------------------------------------
  823. %macro ADD_DC 4
  824. %4 m2, [r0+%3]
  825. %4 m3, [r0+r2+%3]
  826. %4 m4, [r1+%3]
  827. %4 m5, [r1+r2+%3]
  828. paddusb m2, %1
  829. paddusb m3, %1
  830. paddusb m4, %1
  831. paddusb m5, %1
  832. psubusb m2, %2
  833. psubusb m3, %2
  834. psubusb m4, %2
  835. psubusb m5, %2
  836. %4 [r0+%3], m2
  837. %4 [r0+r2+%3], m3
  838. %4 [r1+%3], m4
  839. %4 [r1+r2+%3], m5
  840. %endmacro
  841. INIT_MMX
  842. cglobal vp8_idct_dc_add_mmx, 3, 3
  843. ; load data
  844. movd m0, [r1]
  845. ; calculate DC
  846. paddw m0, [pw_4]
  847. pxor m1, m1
  848. psraw m0, 3
  849. movd [r1], m1
  850. psubw m1, m0
  851. packuswb m0, m0
  852. packuswb m1, m1
  853. punpcklbw m0, m0
  854. punpcklbw m1, m1
  855. punpcklwd m0, m0
  856. punpcklwd m1, m1
  857. ; add DC
  858. lea r1, [r0+r2*2]
  859. ADD_DC m0, m1, 0, movh
  860. RET
  861. INIT_XMM
  862. cglobal vp8_idct_dc_add_sse4, 3, 3, 6
  863. ; load data
  864. movd m0, [r1]
  865. pxor m1, m1
  866. ; calculate DC
  867. paddw m0, [pw_4]
  868. movd [r1], m1
  869. lea r1, [r0+r2*2]
  870. movd m2, [r0]
  871. movd m3, [r0+r2]
  872. movd m4, [r1]
  873. movd m5, [r1+r2]
  874. psraw m0, 3
  875. pshuflw m0, m0, 0
  876. punpcklqdq m0, m0
  877. punpckldq m2, m3
  878. punpckldq m4, m5
  879. punpcklbw m2, m1
  880. punpcklbw m4, m1
  881. paddw m2, m0
  882. paddw m4, m0
  883. packuswb m2, m4
  884. movd [r0], m2
  885. pextrd [r0+r2], m2, 1
  886. pextrd [r1], m2, 2
  887. pextrd [r1+r2], m2, 3
  888. RET
  889. ;-----------------------------------------------------------------------------
  890. ; void vp8_idct_dc_add4_<opt>(uint8_t *dst, DCTELEM block[4][16], int stride);
  891. ;-----------------------------------------------------------------------------
  892. INIT_MMX
  893. cglobal vp8_idct_dc_add4_mmx, 3, 3
  894. ; load data
  895. movd m0, [r1+32*0] ; A
  896. movd m1, [r1+32*2] ; C
  897. punpcklwd m0, [r1+32*1] ; A B
  898. punpcklwd m1, [r1+32*3] ; C D
  899. punpckldq m0, m1 ; A B C D
  900. pxor m6, m6
  901. ; calculate DC
  902. paddw m0, [pw_4]
  903. movd [r1+32*0], m6
  904. movd [r1+32*1], m6
  905. movd [r1+32*2], m6
  906. movd [r1+32*3], m6
  907. psraw m0, 3
  908. psubw m6, m0
  909. packuswb m0, m0
  910. packuswb m6, m6
  911. punpcklbw m0, m0 ; AABBCCDD
  912. punpcklbw m6, m6 ; AABBCCDD
  913. movq m1, m0
  914. movq m7, m6
  915. punpcklbw m0, m0 ; AAAABBBB
  916. punpckhbw m1, m1 ; CCCCDDDD
  917. punpcklbw m6, m6 ; AAAABBBB
  918. punpckhbw m7, m7 ; CCCCDDDD
  919. ; add DC
  920. lea r1, [r0+r2*2]
  921. ADD_DC m0, m6, 0, mova
  922. ADD_DC m1, m7, 8, mova
  923. RET
  924. INIT_XMM
  925. cglobal vp8_idct_dc_add4_sse2, 3, 3
  926. ; load data
  927. movd m0, [r1+32*0] ; A
  928. movd m1, [r1+32*2] ; C
  929. punpcklwd m0, [r1+32*1] ; A B
  930. punpcklwd m1, [r1+32*3] ; C D
  931. punpckldq m0, m1 ; A B C D
  932. pxor m1, m1
  933. ; calculate DC
  934. paddw m0, [pw_4]
  935. movd [r1+32*0], m1
  936. movd [r1+32*1], m1
  937. movd [r1+32*2], m1
  938. movd [r1+32*3], m1
  939. psraw m0, 3
  940. psubw m1, m0
  941. packuswb m0, m0
  942. packuswb m1, m1
  943. punpcklbw m0, m0
  944. punpcklbw m1, m1
  945. punpcklbw m0, m0
  946. punpcklbw m1, m1
  947. ; add DC
  948. lea r1, [r0+r2*2]
  949. ADD_DC m0, m1, 0, mova
  950. RET
  951. ;-----------------------------------------------------------------------------
  952. ; void vp8_idct_add_<opt>(uint8_t *dst, DCTELEM block[16], int stride);
  953. ;-----------------------------------------------------------------------------
  954. ; calculate %1=mul_35468(%1)-mul_20091(%2); %2=mul_20091(%1)+mul_35468(%2)
  955. ; this macro assumes that m6/m7 have words for 20091/17734 loaded
  956. %macro VP8_MULTIPLY_SUMSUB 4
  957. mova %3, %1
  958. mova %4, %2
  959. pmulhw %3, m6 ;20091(1)
  960. pmulhw %4, m6 ;20091(2)
  961. paddw %3, %1
  962. paddw %4, %2
  963. paddw %1, %1
  964. paddw %2, %2
  965. pmulhw %1, m7 ;35468(1)
  966. pmulhw %2, m7 ;35468(2)
  967. psubw %1, %4
  968. paddw %2, %3
  969. %endmacro
  970. ; calculate x0=%1+%3; x1=%1-%3
  971. ; x2=mul_35468(%2)-mul_20091(%4); x3=mul_20091(%2)+mul_35468(%4)
  972. ; %1=x0+x3 (tmp0); %2=x1+x2 (tmp1); %3=x1-x2 (tmp2); %4=x0-x3 (tmp3)
  973. ; %5/%6 are temporary registers
  974. ; we assume m6/m7 have constant words 20091/17734 loaded in them
  975. %macro VP8_IDCT_TRANSFORM4x4_1D 6
  976. SUMSUB_BA m%3, m%1, m%5 ;t0, t1
  977. VP8_MULTIPLY_SUMSUB m%2, m%4, m%5,m%6 ;t2, t3
  978. SUMSUB_BA m%4, m%3, m%5 ;tmp0, tmp3
  979. SUMSUB_BA m%2, m%1, m%5 ;tmp1, tmp2
  980. SWAP %4, %1
  981. SWAP %4, %3
  982. %endmacro
  983. INIT_MMX
  984. %macro VP8_IDCT_ADD 1
  985. cglobal vp8_idct_add_%1, 3, 3
  986. ; load block data
  987. movq m0, [r1+ 0]
  988. movq m1, [r1+ 8]
  989. movq m2, [r1+16]
  990. movq m3, [r1+24]
  991. movq m6, [pw_20091]
  992. movq m7, [pw_17734]
  993. %ifidn %1, sse
  994. xorps xmm0, xmm0
  995. movaps [r1+ 0], xmm0
  996. movaps [r1+16], xmm0
  997. %else
  998. pxor m4, m4
  999. movq [r1+ 0], m4
  1000. movq [r1+ 8], m4
  1001. movq [r1+16], m4
  1002. movq [r1+24], m4
  1003. %endif
  1004. ; actual IDCT
  1005. VP8_IDCT_TRANSFORM4x4_1D 0, 1, 2, 3, 4, 5
  1006. TRANSPOSE4x4W 0, 1, 2, 3, 4
  1007. paddw m0, [pw_4]
  1008. VP8_IDCT_TRANSFORM4x4_1D 0, 1, 2, 3, 4, 5
  1009. TRANSPOSE4x4W 0, 1, 2, 3, 4
  1010. ; store
  1011. pxor m4, m4
  1012. lea r1, [r0+2*r2]
  1013. STORE_DIFFx2 m0, m1, m6, m7, m4, 3, r0, r2
  1014. STORE_DIFFx2 m2, m3, m6, m7, m4, 3, r1, r2
  1015. RET
  1016. %endmacro
  1017. VP8_IDCT_ADD mmx
  1018. VP8_IDCT_ADD sse
  1019. ;-----------------------------------------------------------------------------
  1020. ; void vp8_luma_dc_wht_mmxext(DCTELEM block[4][4][16], DCTELEM dc[16])
  1021. ;-----------------------------------------------------------------------------
  1022. %macro SCATTER_WHT 3
  1023. movd r1d, m%1
  1024. movd r2d, m%2
  1025. mov [r0+2*16*(0+%3)], r1w
  1026. mov [r0+2*16*(1+%3)], r2w
  1027. shr r1d, 16
  1028. shr r2d, 16
  1029. psrlq m%1, 32
  1030. psrlq m%2, 32
  1031. mov [r0+2*16*(4+%3)], r1w
  1032. mov [r0+2*16*(5+%3)], r2w
  1033. movd r1d, m%1
  1034. movd r2d, m%2
  1035. mov [r0+2*16*(8+%3)], r1w
  1036. mov [r0+2*16*(9+%3)], r2w
  1037. shr r1d, 16
  1038. shr r2d, 16
  1039. mov [r0+2*16*(12+%3)], r1w
  1040. mov [r0+2*16*(13+%3)], r2w
  1041. %endmacro
  1042. %macro HADAMARD4_1D 4
  1043. SUMSUB_BADC m%2, m%1, m%4, m%3
  1044. SUMSUB_BADC m%4, m%2, m%3, m%1
  1045. SWAP %1, %4, %3
  1046. %endmacro
  1047. INIT_MMX
  1048. cglobal vp8_luma_dc_wht_mmx, 2,3
  1049. movq m0, [r1]
  1050. movq m1, [r1+8]
  1051. movq m2, [r1+16]
  1052. movq m3, [r1+24]
  1053. HADAMARD4_1D 0, 1, 2, 3
  1054. TRANSPOSE4x4W 0, 1, 2, 3, 4
  1055. paddw m0, [pw_3]
  1056. HADAMARD4_1D 0, 1, 2, 3
  1057. psraw m0, 3
  1058. psraw m1, 3
  1059. psraw m2, 3
  1060. psraw m3, 3
  1061. SCATTER_WHT 0, 1, 0
  1062. SCATTER_WHT 2, 3, 2
  1063. RET
  1064. ;-----------------------------------------------------------------------------
  1065. ; void vp8_h/v_loop_filter_simple_<opt>(uint8_t *dst, int stride, int flim);
  1066. ;-----------------------------------------------------------------------------
  1067. ; macro called with 7 mm register indexes as argument, and 4 regular registers
  1068. ;
  1069. ; first 4 mm registers will carry the transposed pixel data
  1070. ; the other three are scratchspace (one would be sufficient, but this allows
  1071. ; for more spreading/pipelining and thus faster execution on OOE CPUs)
  1072. ;
  1073. ; first two regular registers are buf+4*stride and buf+5*stride
  1074. ; third is -stride, fourth is +stride
  1075. %macro READ_8x4_INTERLEAVED 11
  1076. ; interleave 8 (A-H) rows of 4 pixels each
  1077. movd m%1, [%8+%10*4] ; A0-3
  1078. movd m%5, [%9+%10*4] ; B0-3
  1079. movd m%2, [%8+%10*2] ; C0-3
  1080. movd m%6, [%8+%10] ; D0-3
  1081. movd m%3, [%8] ; E0-3
  1082. movd m%7, [%9] ; F0-3
  1083. movd m%4, [%9+%11] ; G0-3
  1084. punpcklbw m%1, m%5 ; A/B interleaved
  1085. movd m%5, [%9+%11*2] ; H0-3
  1086. punpcklbw m%2, m%6 ; C/D interleaved
  1087. punpcklbw m%3, m%7 ; E/F interleaved
  1088. punpcklbw m%4, m%5 ; G/H interleaved
  1089. %endmacro
  1090. ; macro called with 7 mm register indexes as argument, and 5 regular registers
  1091. ; first 11 mean the same as READ_8x4_TRANSPOSED above
  1092. ; fifth regular register is scratchspace to reach the bottom 8 rows, it
  1093. ; will be set to second regular register + 8*stride at the end
  1094. %macro READ_16x4_INTERLEAVED 12
  1095. ; transpose 16 (A-P) rows of 4 pixels each
  1096. lea %12, [r0+8*r2]
  1097. ; read (and interleave) those addressable by %8 (=r0), A/C/D/E/I/K/L/M
  1098. movd m%1, [%8+%10*4] ; A0-3
  1099. movd m%3, [%12+%10*4] ; I0-3
  1100. movd m%2, [%8+%10*2] ; C0-3
  1101. movd m%4, [%12+%10*2] ; K0-3
  1102. movd m%6, [%8+%10] ; D0-3
  1103. movd m%5, [%12+%10] ; L0-3
  1104. movd m%7, [%12] ; M0-3
  1105. add %12, %11
  1106. punpcklbw m%1, m%3 ; A/I
  1107. movd m%3, [%8] ; E0-3
  1108. punpcklbw m%2, m%4 ; C/K
  1109. punpcklbw m%6, m%5 ; D/L
  1110. punpcklbw m%3, m%7 ; E/M
  1111. punpcklbw m%2, m%6 ; C/D/K/L interleaved
  1112. ; read (and interleave) those addressable by %9 (=r4), B/F/G/H/J/N/O/P
  1113. movd m%5, [%9+%10*4] ; B0-3
  1114. movd m%4, [%12+%10*4] ; J0-3
  1115. movd m%7, [%9] ; F0-3
  1116. movd m%6, [%12] ; N0-3
  1117. punpcklbw m%5, m%4 ; B/J
  1118. punpcklbw m%7, m%6 ; F/N
  1119. punpcklbw m%1, m%5 ; A/B/I/J interleaved
  1120. punpcklbw m%3, m%7 ; E/F/M/N interleaved
  1121. movd m%4, [%9+%11] ; G0-3
  1122. movd m%6, [%12+%11] ; O0-3
  1123. movd m%5, [%9+%11*2] ; H0-3
  1124. movd m%7, [%12+%11*2] ; P0-3
  1125. punpcklbw m%4, m%6 ; G/O
  1126. punpcklbw m%5, m%7 ; H/P
  1127. punpcklbw m%4, m%5 ; G/H/O/P interleaved
  1128. %endmacro
  1129. ; write 4 mm registers of 2 dwords each
  1130. ; first four arguments are mm register indexes containing source data
  1131. ; last four are registers containing buf+4*stride, buf+5*stride,
  1132. ; -stride and +stride
  1133. %macro WRITE_4x2D 8
  1134. ; write out (2 dwords per register)
  1135. movd [%5+%7*4], m%1
  1136. movd [%5+%7*2], m%2
  1137. movd [%5], m%3
  1138. movd [%6+%8], m%4
  1139. punpckhdq m%1, m%1
  1140. punpckhdq m%2, m%2
  1141. punpckhdq m%3, m%3
  1142. punpckhdq m%4, m%4
  1143. movd [%6+%7*4], m%1
  1144. movd [%5+%7], m%2
  1145. movd [%6], m%3
  1146. movd [%6+%8*2], m%4
  1147. %endmacro
  1148. ; write 4 xmm registers of 4 dwords each
  1149. ; arguments same as WRITE_2x4D, but with an extra register, so that the 5 regular
  1150. ; registers contain buf+4*stride, buf+5*stride, buf+12*stride, -stride and +stride
  1151. ; we add 1*stride to the third regular registry in the process
  1152. ; the 10th argument is 16 if it's a Y filter (i.e. all regular registers cover the
  1153. ; same memory region), or 8 if they cover two separate buffers (third one points to
  1154. ; a different memory region than the first two), allowing for more optimal code for
  1155. ; the 16-width case
  1156. %macro WRITE_4x4D 10
  1157. ; write out (4 dwords per register), start with dwords zero
  1158. movd [%5+%8*4], m%1
  1159. movd [%5], m%2
  1160. movd [%7+%8*4], m%3
  1161. movd [%7], m%4
  1162. ; store dwords 1
  1163. psrldq m%1, 4
  1164. psrldq m%2, 4
  1165. psrldq m%3, 4
  1166. psrldq m%4, 4
  1167. movd [%6+%8*4], m%1
  1168. movd [%6], m%2
  1169. %if %10 == 16
  1170. movd [%6+%9*4], m%3
  1171. %endif
  1172. movd [%7+%9], m%4
  1173. ; write dwords 2
  1174. psrldq m%1, 4
  1175. psrldq m%2, 4
  1176. %if %10 == 8
  1177. movd [%5+%8*2], m%1
  1178. movd %5, m%3
  1179. %endif
  1180. psrldq m%3, 4
  1181. psrldq m%4, 4
  1182. %if %10 == 16
  1183. movd [%5+%8*2], m%1
  1184. %endif
  1185. movd [%6+%9], m%2
  1186. movd [%7+%8*2], m%3
  1187. movd [%7+%9*2], m%4
  1188. add %7, %9
  1189. ; store dwords 3
  1190. psrldq m%1, 4
  1191. psrldq m%2, 4
  1192. psrldq m%3, 4
  1193. psrldq m%4, 4
  1194. %if %10 == 8
  1195. mov [%7+%8*4], %5d
  1196. movd [%6+%8*2], m%1
  1197. %else
  1198. movd [%5+%8], m%1
  1199. %endif
  1200. movd [%6+%9*2], m%2
  1201. movd [%7+%8*2], m%3
  1202. movd [%7+%9*2], m%4
  1203. %endmacro
  1204. %macro SPLATB_REG 3-4
  1205. movd %1, %2
  1206. %ifidn %3, ssse3
  1207. pshufb %1, %4
  1208. %else
  1209. punpcklbw %1, %1
  1210. %if mmsize == 16 ; sse2
  1211. pshuflw %1, %1, 0x0
  1212. punpcklqdq %1, %1
  1213. %elifidn %3, mmx
  1214. punpcklwd %1, %1
  1215. punpckldq %1, %1
  1216. %else ; mmxext
  1217. pshufw %1, %1, 0x0
  1218. %endif
  1219. %endif
  1220. %endmacro
  1221. %macro SIMPLE_LOOPFILTER 3
  1222. cglobal vp8_%2_loop_filter_simple_%1, 3, %3
  1223. %ifidn %2, h
  1224. mov r5, rsp ; backup stack pointer
  1225. and rsp, ~(mmsize-1) ; align stack
  1226. %endif
  1227. %if mmsize == 8 ; mmx/mmxext
  1228. mov r3, 2
  1229. %endif
  1230. %ifidn %1, ssse3
  1231. pxor m0, m0
  1232. %endif
  1233. SPLATB_REG m7, r2, %1, m0 ; splat "flim" into register
  1234. ; set up indexes to address 4 rows
  1235. mov r2, r1
  1236. neg r1
  1237. %ifidn %2, h
  1238. lea r0, [r0+4*r2-2]
  1239. sub rsp, mmsize*2 ; (aligned) storage space for saving p1/q1
  1240. %endif
  1241. %if mmsize == 8 ; mmx / mmxext
  1242. .next8px
  1243. %endif
  1244. %ifidn %2, v
  1245. ; read 4 half/full rows of pixels
  1246. mova m0, [r0+r1*2] ; p1
  1247. mova m1, [r0+r1] ; p0
  1248. mova m2, [r0] ; q0
  1249. mova m3, [r0+r2] ; q1
  1250. %else ; h
  1251. lea r4, [r0+r2]
  1252. %if mmsize == 8 ; mmx/mmxext
  1253. READ_8x4_INTERLEAVED 0, 1, 2, 3, 4, 5, 6, r0, r4, r1, r2
  1254. %else ; sse2
  1255. READ_16x4_INTERLEAVED 0, 1, 2, 3, 4, 5, 6, r0, r4, r1, r2, r3
  1256. %endif
  1257. TRANSPOSE4x4W 0, 1, 2, 3, 4
  1258. mova [rsp], m0 ; store p1
  1259. mova [rsp+mmsize], m3 ; store q1
  1260. %endif
  1261. ; simple_limit
  1262. mova m5, m2 ; m5=backup of q0
  1263. mova m6, m1 ; m6=backup of p0
  1264. psubusb m1, m2 ; p0-q0
  1265. psubusb m2, m6 ; q0-p0
  1266. por m1, m2 ; FFABS(p0-q0)
  1267. paddusb m1, m1 ; m1=FFABS(p0-q0)*2
  1268. mova m4, m3
  1269. mova m2, m0
  1270. psubusb m3, m0 ; q1-p1
  1271. psubusb m0, m4 ; p1-q1
  1272. por m3, m0 ; FFABS(p1-q1)
  1273. mova m0, [pb_80]
  1274. pxor m2, m0
  1275. pxor m4, m0
  1276. psubsb m2, m4 ; m2=p1-q1 (signed) backup for below
  1277. pand m3, [pb_FE]
  1278. psrlq m3, 1 ; m3=FFABS(p1-q1)/2, this can be used signed
  1279. paddusb m3, m1
  1280. psubusb m3, m7
  1281. pxor m1, m1
  1282. pcmpeqb m3, m1 ; abs(p0-q0)*2+abs(p1-q1)/2<=flim mask(0xff/0x0)
  1283. ; filter_common (use m2/p1-q1, m4=q0, m6=p0, m5/q0-p0 and m3/mask)
  1284. mova m4, m5
  1285. pxor m5, m0
  1286. pxor m0, m6
  1287. psubsb m5, m0 ; q0-p0 (signed)
  1288. paddsb m2, m5
  1289. paddsb m2, m5
  1290. paddsb m2, m5 ; a=(p1-q1) + 3*(q0-p0)
  1291. pand m2, m3 ; apply filter mask (m3)
  1292. mova m3, [pb_F8]
  1293. mova m1, m2
  1294. paddsb m2, [pb_4] ; f1<<3=a+4
  1295. paddsb m1, [pb_3] ; f2<<3=a+3
  1296. pand m2, m3
  1297. pand m1, m3 ; cache f2<<3
  1298. pxor m0, m0
  1299. pxor m3, m3
  1300. pcmpgtb m0, m2 ; which values are <0?
  1301. psubb m3, m2 ; -f1<<3
  1302. psrlq m2, 3 ; +f1
  1303. psrlq m3, 3 ; -f1
  1304. pand m3, m0
  1305. pandn m0, m2
  1306. psubusb m4, m0
  1307. paddusb m4, m3 ; q0-f1
  1308. pxor m0, m0
  1309. pxor m3, m3
  1310. pcmpgtb m0, m1 ; which values are <0?
  1311. psubb m3, m1 ; -f2<<3
  1312. psrlq m1, 3 ; +f2
  1313. psrlq m3, 3 ; -f2
  1314. pand m3, m0
  1315. pandn m0, m1
  1316. paddusb m6, m0
  1317. psubusb m6, m3 ; p0+f2
  1318. ; store
  1319. %ifidn %2, v
  1320. mova [r0], m4
  1321. mova [r0+r1], m6
  1322. %else ; h
  1323. mova m0, [rsp] ; p1
  1324. SWAP 2, 4 ; p0
  1325. SWAP 1, 6 ; q0
  1326. mova m3, [rsp+mmsize] ; q1
  1327. TRANSPOSE4x4B 0, 1, 2, 3, 4
  1328. %if mmsize == 16 ; sse2
  1329. add r3, r1 ; change from r4*8*stride to r0+8*stride
  1330. WRITE_4x4D 0, 1, 2, 3, r0, r4, r3, r1, r2, 16
  1331. %else ; mmx/mmxext
  1332. WRITE_4x2D 0, 1, 2, 3, r0, r4, r1, r2
  1333. %endif
  1334. %endif
  1335. %if mmsize == 8 ; mmx/mmxext
  1336. ; next 8 pixels
  1337. %ifidn %2, v
  1338. add r0, 8 ; advance 8 cols = pixels
  1339. %else ; h
  1340. lea r0, [r0+r2*8] ; advance 8 rows = lines
  1341. %endif
  1342. dec r3
  1343. jg .next8px
  1344. %ifidn %2, v
  1345. REP_RET
  1346. %else ; h
  1347. mov rsp, r5 ; restore stack pointer
  1348. RET
  1349. %endif
  1350. %else ; sse2
  1351. %ifidn %2, h
  1352. mov rsp, r5 ; restore stack pointer
  1353. %endif
  1354. RET
  1355. %endif
  1356. %endmacro
  1357. INIT_MMX
  1358. SIMPLE_LOOPFILTER mmx, v, 4
  1359. SIMPLE_LOOPFILTER mmx, h, 6
  1360. SIMPLE_LOOPFILTER mmxext, v, 4
  1361. SIMPLE_LOOPFILTER mmxext, h, 6
  1362. INIT_XMM
  1363. SIMPLE_LOOPFILTER sse2, v, 3
  1364. SIMPLE_LOOPFILTER sse2, h, 6
  1365. SIMPLE_LOOPFILTER ssse3, v, 3
  1366. SIMPLE_LOOPFILTER ssse3, h, 6
  1367. ;-----------------------------------------------------------------------------
  1368. ; void vp8_h/v_loop_filter<size>_inner_<opt>(uint8_t *dst, [uint8_t *v,] int stride,
  1369. ; int flimE, int flimI, int hev_thr);
  1370. ;-----------------------------------------------------------------------------
  1371. %macro INNER_LOOPFILTER 5
  1372. %if %4 == 8 ; chroma
  1373. cglobal vp8_%2_loop_filter8uv_inner_%1, 6, %3, %5
  1374. %define dst8_reg r1
  1375. %define mstride_reg r2
  1376. %define E_reg r3
  1377. %define I_reg r4
  1378. %define hev_thr_reg r5
  1379. %else ; luma
  1380. cglobal vp8_%2_loop_filter16y_inner_%1, 5, %3, %5
  1381. %define mstride_reg r1
  1382. %define E_reg r2
  1383. %define I_reg r3
  1384. %define hev_thr_reg r4
  1385. %ifdef m8 ; x86-64, sse2
  1386. %define dst8_reg r4
  1387. %elif mmsize == 16 ; x86-32, sse2
  1388. %define dst8_reg r5
  1389. %else ; x86-32, mmx/mmxext
  1390. %define cnt_reg r5
  1391. %endif
  1392. %endif
  1393. %define dst_reg r0
  1394. %define stride_reg E_reg
  1395. %define dst2_reg I_reg
  1396. %ifndef m8
  1397. %define stack_reg hev_thr_reg
  1398. %endif
  1399. %ifidn %1, ssse3
  1400. pxor m7, m7
  1401. %endif
  1402. %ifndef m8 ; mmx/mmxext or sse2 on x86-32
  1403. ; splat function arguments
  1404. SPLATB_REG m0, E_reg, %1, m7 ; E
  1405. SPLATB_REG m1, I_reg, %1, m7 ; I
  1406. SPLATB_REG m2, hev_thr_reg, %1, m7 ; hev_thresh
  1407. ; align stack
  1408. mov stack_reg, rsp ; backup stack pointer
  1409. and rsp, ~(mmsize-1) ; align stack
  1410. %ifidn %2, v
  1411. sub rsp, mmsize * 4 ; stack layout: [0]=E, [1]=I, [2]=hev_thr
  1412. ; [3]=hev() result
  1413. %else ; h
  1414. sub rsp, mmsize * 5 ; extra storage space for transposes
  1415. %endif
  1416. %define flim_E [rsp]
  1417. %define flim_I [rsp+mmsize]
  1418. %define hev_thr [rsp+mmsize*2]
  1419. %define mask_res [rsp+mmsize*3]
  1420. %define p0backup [rsp+mmsize*3]
  1421. %define q0backup [rsp+mmsize*4]
  1422. mova flim_E, m0
  1423. mova flim_I, m1
  1424. mova hev_thr, m2
  1425. %else ; sse2 on x86-64
  1426. %define flim_E m9
  1427. %define flim_I m10
  1428. %define hev_thr m11
  1429. %define mask_res m12
  1430. %define p0backup m12
  1431. %define q0backup m8
  1432. ; splat function arguments
  1433. SPLATB_REG flim_E, E_reg, %1, m7 ; E
  1434. SPLATB_REG flim_I, I_reg, %1, m7 ; I
  1435. SPLATB_REG hev_thr, hev_thr_reg, %1, m7 ; hev_thresh
  1436. %endif
  1437. %if mmsize == 8 && %4 == 16 ; mmx/mmxext
  1438. mov cnt_reg, 2
  1439. %endif
  1440. mov stride_reg, mstride_reg
  1441. neg mstride_reg
  1442. %ifidn %2, h
  1443. lea dst_reg, [dst_reg + stride_reg*4-4]
  1444. %if %4 == 8
  1445. lea dst8_reg, [dst8_reg+ stride_reg*4-4]
  1446. %endif
  1447. %endif
  1448. %if mmsize == 8
  1449. .next8px
  1450. %endif
  1451. ; read
  1452. lea dst2_reg, [dst_reg + stride_reg]
  1453. %ifidn %2, v
  1454. %if %4 == 8 && mmsize == 16
  1455. %define movrow movh
  1456. %else
  1457. %define movrow mova
  1458. %endif
  1459. movrow m0, [dst_reg +mstride_reg*4] ; p3
  1460. movrow m1, [dst2_reg+mstride_reg*4] ; p2
  1461. movrow m2, [dst_reg +mstride_reg*2] ; p1
  1462. movrow m5, [dst2_reg] ; q1
  1463. movrow m6, [dst2_reg+ stride_reg] ; q2
  1464. movrow m7, [dst2_reg+ stride_reg*2] ; q3
  1465. %if mmsize == 16 && %4 == 8
  1466. movhps m0, [dst8_reg+mstride_reg*4]
  1467. movhps m2, [dst8_reg+mstride_reg*2]
  1468. add dst8_reg, stride_reg
  1469. movhps m1, [dst8_reg+mstride_reg*4]
  1470. movhps m5, [dst8_reg]
  1471. movhps m6, [dst8_reg+ stride_reg]
  1472. movhps m7, [dst8_reg+ stride_reg*2]
  1473. add dst8_reg, mstride_reg
  1474. %endif
  1475. %elif mmsize == 8 ; mmx/mmxext (h)
  1476. ; read 8 rows of 8px each
  1477. movu m0, [dst_reg +mstride_reg*4]
  1478. movu m1, [dst2_reg+mstride_reg*4]
  1479. movu m2, [dst_reg +mstride_reg*2]
  1480. movu m3, [dst_reg +mstride_reg]
  1481. movu m4, [dst_reg]
  1482. movu m5, [dst2_reg]
  1483. movu m6, [dst2_reg+ stride_reg]
  1484. ; 8x8 transpose
  1485. TRANSPOSE4x4B 0, 1, 2, 3, 7
  1486. mova q0backup, m1
  1487. movu m7, [dst2_reg+ stride_reg*2]
  1488. TRANSPOSE4x4B 4, 5, 6, 7, 1
  1489. SBUTTERFLY dq, 0, 4, 1 ; p3/p2
  1490. SBUTTERFLY dq, 2, 6, 1 ; q0/q1
  1491. SBUTTERFLY dq, 3, 7, 1 ; q2/q3
  1492. mova m1, q0backup
  1493. mova q0backup, m2 ; store q0
  1494. SBUTTERFLY dq, 1, 5, 2 ; p1/p0
  1495. mova p0backup, m5 ; store p0
  1496. SWAP 1, 4
  1497. SWAP 2, 4
  1498. SWAP 6, 3
  1499. SWAP 5, 3
  1500. %else ; sse2 (h)
  1501. %if %4 == 16
  1502. lea dst8_reg, [dst_reg + stride_reg*8]
  1503. %endif
  1504. ; read 16 rows of 8px each, interleave
  1505. movh m0, [dst_reg +mstride_reg*4]
  1506. movh m1, [dst8_reg+mstride_reg*4]
  1507. movh m2, [dst_reg +mstride_reg*2]
  1508. movh m5, [dst8_reg+mstride_reg*2]
  1509. movh m3, [dst_reg +mstride_reg]
  1510. movh m6, [dst8_reg+mstride_reg]
  1511. movh m4, [dst_reg]
  1512. movh m7, [dst8_reg]
  1513. punpcklbw m0, m1 ; A/I
  1514. punpcklbw m2, m5 ; C/K
  1515. punpcklbw m3, m6 ; D/L
  1516. punpcklbw m4, m7 ; E/M
  1517. add dst8_reg, stride_reg
  1518. movh m1, [dst2_reg+mstride_reg*4]
  1519. movh m6, [dst8_reg+mstride_reg*4]
  1520. movh m5, [dst2_reg]
  1521. movh m7, [dst8_reg]
  1522. punpcklbw m1, m6 ; B/J
  1523. punpcklbw m5, m7 ; F/N
  1524. movh m6, [dst2_reg+ stride_reg]
  1525. movh m7, [dst8_reg+ stride_reg]
  1526. punpcklbw m6, m7 ; G/O
  1527. ; 8x16 transpose
  1528. TRANSPOSE4x4B 0, 1, 2, 3, 7
  1529. %ifdef m8
  1530. SWAP 1, 8
  1531. %else
  1532. mova q0backup, m1
  1533. %endif
  1534. movh m7, [dst2_reg+ stride_reg*2]
  1535. movh m1, [dst8_reg+ stride_reg*2]
  1536. punpcklbw m7, m1 ; H/P
  1537. TRANSPOSE4x4B 4, 5, 6, 7, 1
  1538. SBUTTERFLY dq, 0, 4, 1 ; p3/p2
  1539. SBUTTERFLY dq, 2, 6, 1 ; q0/q1
  1540. SBUTTERFLY dq, 3, 7, 1 ; q2/q3
  1541. %ifdef m8
  1542. SWAP 1, 8
  1543. SWAP 2, 8
  1544. %else
  1545. mova m1, q0backup
  1546. mova q0backup, m2 ; store q0
  1547. %endif
  1548. SBUTTERFLY dq, 1, 5, 2 ; p1/p0
  1549. %ifdef m12
  1550. SWAP 5, 12
  1551. %else
  1552. mova p0backup, m5 ; store p0
  1553. %endif
  1554. SWAP 1, 4
  1555. SWAP 2, 4
  1556. SWAP 6, 3
  1557. SWAP 5, 3
  1558. %endif
  1559. ; normal_limit for p3-p2, p2-p1, q3-q2 and q2-q1
  1560. mova m4, m1
  1561. SWAP 4, 1
  1562. psubusb m4, m0 ; p2-p3
  1563. psubusb m0, m1 ; p3-p2
  1564. por m0, m4 ; abs(p3-p2)
  1565. mova m4, m2
  1566. SWAP 4, 2
  1567. psubusb m4, m1 ; p1-p2
  1568. psubusb m1, m2 ; p2-p1
  1569. por m1, m4 ; abs(p2-p1)
  1570. mova m4, m6
  1571. SWAP 4, 6
  1572. psubusb m4, m7 ; q2-q3
  1573. psubusb m7, m6 ; q3-q2
  1574. por m7, m4 ; abs(q3-q2)
  1575. mova m4, m5
  1576. SWAP 4, 5
  1577. psubusb m4, m6 ; q1-q2
  1578. psubusb m6, m5 ; q2-q1
  1579. por m6, m4 ; abs(q2-q1)
  1580. %ifidn %1, mmx
  1581. mova m4, flim_I
  1582. pxor m3, m3
  1583. psubusb m0, m4
  1584. psubusb m1, m4
  1585. psubusb m7, m4
  1586. psubusb m6, m4
  1587. pcmpeqb m0, m3 ; abs(p3-p2) <= I
  1588. pcmpeqb m1, m3 ; abs(p2-p1) <= I
  1589. pcmpeqb m7, m3 ; abs(q3-q2) <= I
  1590. pcmpeqb m6, m3 ; abs(q2-q1) <= I
  1591. pand m0, m1
  1592. pand m7, m6
  1593. pand m0, m7
  1594. %else ; mmxext/sse2
  1595. pmaxub m0, m1
  1596. pmaxub m6, m7
  1597. pmaxub m0, m6
  1598. %endif
  1599. ; normal_limit and high_edge_variance for p1-p0, q1-q0
  1600. SWAP 7, 3 ; now m7 is zero
  1601. %ifidn %2, v
  1602. movrow m3, [dst_reg +mstride_reg] ; p0
  1603. %if mmsize == 16 && %4 == 8
  1604. movhps m3, [dst8_reg+mstride_reg]
  1605. %endif
  1606. %elifdef m12
  1607. SWAP 3, 12
  1608. %else
  1609. mova m3, p0backup
  1610. %endif
  1611. mova m1, m2
  1612. SWAP 1, 2
  1613. mova m6, m3
  1614. SWAP 3, 6
  1615. psubusb m1, m3 ; p1-p0
  1616. psubusb m6, m2 ; p0-p1
  1617. por m1, m6 ; abs(p1-p0)
  1618. %ifidn %1, mmx
  1619. mova m6, m1
  1620. psubusb m1, m4
  1621. psubusb m6, hev_thr
  1622. pcmpeqb m1, m7 ; abs(p1-p0) <= I
  1623. pcmpeqb m6, m7 ; abs(p1-p0) <= hev_thresh
  1624. pand m0, m1
  1625. mova mask_res, m6
  1626. %else ; mmxext/sse2
  1627. pmaxub m0, m1 ; max_I
  1628. SWAP 1, 4 ; max_hev_thresh
  1629. %endif
  1630. SWAP 6, 4 ; now m6 is I
  1631. %ifidn %2, v
  1632. movrow m4, [dst_reg] ; q0
  1633. %if mmsize == 16 && %4 == 8
  1634. movhps m4, [dst8_reg]
  1635. %endif
  1636. %elifdef m8
  1637. SWAP 4, 8
  1638. %else
  1639. mova m4, q0backup
  1640. %endif
  1641. mova m1, m4
  1642. SWAP 1, 4
  1643. mova m7, m5
  1644. SWAP 7, 5
  1645. psubusb m1, m5 ; q0-q1
  1646. psubusb m7, m4 ; q1-q0
  1647. por m1, m7 ; abs(q1-q0)
  1648. %ifidn %1, mmx
  1649. mova m7, m1
  1650. psubusb m1, m6
  1651. psubusb m7, hev_thr
  1652. pxor m6, m6
  1653. pcmpeqb m1, m6 ; abs(q1-q0) <= I
  1654. pcmpeqb m7, m6 ; abs(q1-q0) <= hev_thresh
  1655. mova m6, mask_res
  1656. pand m0, m1 ; abs([pq][321]-[pq][210]) <= I
  1657. pand m6, m7
  1658. %else ; mmxext/sse2
  1659. pxor m7, m7
  1660. pmaxub m0, m1
  1661. pmaxub m6, m1
  1662. psubusb m0, flim_I
  1663. psubusb m6, hev_thr
  1664. pcmpeqb m0, m7 ; max(abs(..)) <= I
  1665. pcmpeqb m6, m7 ; !(max(abs..) > thresh)
  1666. %endif
  1667. %ifdef m12
  1668. SWAP 6, 12
  1669. %else
  1670. mova mask_res, m6 ; !(abs(p1-p0) > hev_t || abs(q1-q0) > hev_t)
  1671. %endif
  1672. ; simple_limit
  1673. mova m1, m3
  1674. SWAP 1, 3
  1675. mova m6, m4 ; keep copies of p0/q0 around for later use
  1676. SWAP 6, 4
  1677. psubusb m1, m4 ; p0-q0
  1678. psubusb m6, m3 ; q0-p0
  1679. por m1, m6 ; abs(q0-p0)
  1680. paddusb m1, m1 ; m1=2*abs(q0-p0)
  1681. mova m7, m2
  1682. SWAP 7, 2
  1683. mova m6, m5
  1684. SWAP 6, 5
  1685. psubusb m7, m5 ; p1-q1
  1686. psubusb m6, m2 ; q1-p1
  1687. por m7, m6 ; abs(q1-p1)
  1688. pxor m6, m6
  1689. pand m7, [pb_FE]
  1690. psrlq m7, 1 ; abs(q1-p1)/2
  1691. paddusb m7, m1 ; abs(q0-p0)*2+abs(q1-p1)/2
  1692. psubusb m7, flim_E
  1693. pcmpeqb m7, m6 ; abs(q0-p0)*2+abs(q1-p1)/2 <= E
  1694. pand m0, m7 ; normal_limit result
  1695. ; filter_common; at this point, m2-m5=p1-q1 and m0 is filter_mask
  1696. %ifdef m8 ; x86-64 && sse2
  1697. mova m8, [pb_80]
  1698. %define pb_80_var m8
  1699. %else ; x86-32 or mmx/mmxext
  1700. %define pb_80_var [pb_80]
  1701. %endif
  1702. mova m1, m4
  1703. mova m7, m3
  1704. pxor m1, pb_80_var
  1705. pxor m7, pb_80_var
  1706. psubsb m1, m7 ; (signed) q0-p0
  1707. mova m6, m2
  1708. mova m7, m5
  1709. pxor m6, pb_80_var
  1710. pxor m7, pb_80_var
  1711. psubsb m6, m7 ; (signed) p1-q1
  1712. mova m7, mask_res
  1713. pandn m7, m6
  1714. paddsb m7, m1
  1715. paddsb m7, m1
  1716. paddsb m7, m1 ; 3*(q0-p0)+is4tap?(p1-q1)
  1717. pand m7, m0
  1718. mova m1, [pb_F8]
  1719. mova m6, m7
  1720. paddsb m7, [pb_3]
  1721. paddsb m6, [pb_4]
  1722. pand m7, m1
  1723. pand m6, m1
  1724. pxor m1, m1
  1725. pxor m0, m0
  1726. pcmpgtb m1, m7
  1727. psubb m0, m7
  1728. psrlq m7, 3 ; +f2
  1729. psrlq m0, 3 ; -f2
  1730. pand m0, m1
  1731. pandn m1, m7
  1732. psubusb m3, m0
  1733. paddusb m3, m1 ; p0+f2
  1734. pxor m1, m1
  1735. pxor m0, m0
  1736. pcmpgtb m0, m6
  1737. psubb m1, m6
  1738. psrlq m6, 3 ; +f1
  1739. psrlq m1, 3 ; -f1
  1740. pand m1, m0
  1741. pandn m0, m6
  1742. psubusb m4, m0
  1743. paddusb m4, m1 ; q0-f1
  1744. %ifdef m12
  1745. SWAP 6, 12
  1746. %else
  1747. mova m6, mask_res
  1748. %endif
  1749. %ifidn %1, mmx
  1750. mova m7, [pb_1]
  1751. %else ; mmxext/sse2
  1752. pxor m7, m7
  1753. %endif
  1754. pand m0, m6
  1755. pand m1, m6
  1756. %ifidn %1, mmx
  1757. paddusb m0, m7
  1758. pand m1, [pb_FE]
  1759. pandn m7, m0
  1760. psrlq m1, 1
  1761. psrlq m7, 1
  1762. SWAP 0, 7
  1763. %else ; mmxext/sse2
  1764. psubusb m1, [pb_1]
  1765. pavgb m0, m7 ; a
  1766. pavgb m1, m7 ; -a
  1767. %endif
  1768. psubusb m5, m0
  1769. psubusb m2, m1
  1770. paddusb m5, m1 ; q1-a
  1771. paddusb m2, m0 ; p1+a
  1772. ; store
  1773. %ifidn %2, v
  1774. movrow [dst_reg +mstride_reg*2], m2
  1775. movrow [dst_reg +mstride_reg ], m3
  1776. movrow [dst_reg], m4
  1777. movrow [dst_reg + stride_reg ], m5
  1778. %if mmsize == 16 && %4 == 8
  1779. movhps [dst8_reg+mstride_reg*2], m2
  1780. movhps [dst8_reg+mstride_reg ], m3
  1781. movhps [dst8_reg], m4
  1782. movhps [dst8_reg+ stride_reg ], m5
  1783. %endif
  1784. %else ; h
  1785. add dst_reg, 2
  1786. add dst2_reg, 2
  1787. ; 4x8/16 transpose
  1788. TRANSPOSE4x4B 2, 3, 4, 5, 6
  1789. %if mmsize == 8 ; mmx/mmxext (h)
  1790. WRITE_4x2D 2, 3, 4, 5, dst_reg, dst2_reg, mstride_reg, stride_reg
  1791. %else ; sse2 (h)
  1792. lea dst8_reg, [dst8_reg+mstride_reg+2]
  1793. WRITE_4x4D 2, 3, 4, 5, dst_reg, dst2_reg, dst8_reg, mstride_reg, stride_reg, %4
  1794. %endif
  1795. %endif
  1796. %if mmsize == 8
  1797. %if %4 == 8 ; chroma
  1798. %ifidn %2, h
  1799. sub dst_reg, 2
  1800. %endif
  1801. cmp dst_reg, dst8_reg
  1802. mov dst_reg, dst8_reg
  1803. jnz .next8px
  1804. %else
  1805. %ifidn %2, h
  1806. lea dst_reg, [dst_reg + stride_reg*8-2]
  1807. %else ; v
  1808. add dst_reg, 8
  1809. %endif
  1810. dec cnt_reg
  1811. jg .next8px
  1812. %endif
  1813. %endif
  1814. %ifndef m8 ; sse2 on x86-32 or mmx/mmxext
  1815. mov rsp, stack_reg ; restore stack pointer
  1816. %endif
  1817. RET
  1818. %endmacro
  1819. INIT_MMX
  1820. INNER_LOOPFILTER mmx, v, 6, 16, 0
  1821. INNER_LOOPFILTER mmx, h, 6, 16, 0
  1822. INNER_LOOPFILTER mmxext, v, 6, 16, 0
  1823. INNER_LOOPFILTER mmxext, h, 6, 16, 0
  1824. INNER_LOOPFILTER mmx, v, 6, 8, 0
  1825. INNER_LOOPFILTER mmx, h, 6, 8, 0
  1826. INNER_LOOPFILTER mmxext, v, 6, 8, 0
  1827. INNER_LOOPFILTER mmxext, h, 6, 8, 0
  1828. INIT_XMM
  1829. INNER_LOOPFILTER sse2, v, 5, 16, 13
  1830. %ifdef m8
  1831. INNER_LOOPFILTER sse2, h, 5, 16, 13
  1832. %else
  1833. INNER_LOOPFILTER sse2, h, 6, 16, 13
  1834. %endif
  1835. INNER_LOOPFILTER sse2, v, 6, 8, 13
  1836. INNER_LOOPFILTER sse2, h, 6, 8, 13
  1837. INNER_LOOPFILTER ssse3, v, 5, 16, 13
  1838. %ifdef m8
  1839. INNER_LOOPFILTER ssse3, h, 5, 16, 13
  1840. %else
  1841. INNER_LOOPFILTER ssse3, h, 6, 16, 13
  1842. %endif
  1843. INNER_LOOPFILTER ssse3, v, 6, 8, 13
  1844. INNER_LOOPFILTER ssse3, h, 6, 8, 13
  1845. ;-----------------------------------------------------------------------------
  1846. ; void vp8_h/v_loop_filter<size>_mbedge_<opt>(uint8_t *dst, [uint8_t *v,] int stride,
  1847. ; int flimE, int flimI, int hev_thr);
  1848. ;-----------------------------------------------------------------------------
  1849. ; write 4 or 8 words in the mmx/xmm registers as 8 lines
  1850. ; 1 and 2 are the registers to write, this can be the same (for SSE2)
  1851. ; for pre-SSE4:
  1852. ; 3 is a general-purpose register that we will clobber
  1853. ; for SSE4:
  1854. ; 3 is a pointer to the destination's 5th line
  1855. ; 4 is a pointer to the destination's 4th line
  1856. ; 5/6 is -stride and +stride
  1857. ; 7 is optimization string
  1858. %macro WRITE_8W 7
  1859. %ifidn %7, sse4
  1860. pextrw [%4+%5*4], %1, 0
  1861. pextrw [%3+%5*4], %1, 1
  1862. pextrw [%4+%5*2], %1, 2
  1863. pextrw [%4+%5 ], %1, 3
  1864. pextrw [%4 ], %1, 4
  1865. pextrw [%3 ], %1, 5
  1866. pextrw [%3+%6 ], %1, 6
  1867. pextrw [%3+%6*2], %1, 7
  1868. %else
  1869. movd %3, %1
  1870. %if mmsize == 8
  1871. punpckhdq %1, %1
  1872. %else
  1873. psrldq %1, 4
  1874. %endif
  1875. mov [%4+%5*4], %3w
  1876. shr %3, 16
  1877. add %4, %6
  1878. mov [%4+%5*4], %3w
  1879. movd %3, %1
  1880. %if mmsize == 16
  1881. psrldq %1, 4
  1882. %endif
  1883. add %4, %5
  1884. mov [%4+%5*2], %3w
  1885. shr %3, 16
  1886. mov [%4+%5 ], %3w
  1887. movd %3, %2
  1888. %if mmsize == 8
  1889. punpckhdq %2, %2
  1890. %else
  1891. psrldq %2, 4
  1892. %endif
  1893. mov [%4 ], %3w
  1894. shr %3, 16
  1895. mov [%4+%6 ], %3w
  1896. movd %3, %2
  1897. add %4, %6
  1898. mov [%4+%6 ], %3w
  1899. shr %3, 16
  1900. mov [%4+%6*2], %3w
  1901. %if mmsize == 8
  1902. add %4, %5
  1903. %endif
  1904. %endif
  1905. %endmacro
  1906. %macro MBEDGE_LOOPFILTER 5
  1907. %if %4 == 8 ; chroma
  1908. cglobal vp8_%2_loop_filter8uv_mbedge_%1, 6, %3, %5
  1909. %define dst8_reg r1
  1910. %define mstride_reg r2
  1911. %define E_reg r3
  1912. %define I_reg r4
  1913. %define hev_thr_reg r5
  1914. %else ; luma
  1915. cglobal vp8_%2_loop_filter16y_mbedge_%1, 5, %3, %5
  1916. %define mstride_reg r1
  1917. %define E_reg r2
  1918. %define I_reg r3
  1919. %define hev_thr_reg r4
  1920. %ifdef m8 ; x86-64, sse2
  1921. %define dst8_reg r4
  1922. %elif mmsize == 16 ; x86-32, sse2
  1923. %define dst8_reg r5
  1924. %else ; x86-32, mmx/mmxext
  1925. %define cnt_reg r5
  1926. %endif
  1927. %endif
  1928. %define dst_reg r0
  1929. %define stride_reg E_reg
  1930. %define dst2_reg I_reg
  1931. %ifndef m8
  1932. %define stack_reg hev_thr_reg
  1933. %endif
  1934. %ifidn %1, ssse3
  1935. pxor m7, m7
  1936. %endif
  1937. %ifndef m8 ; mmx/mmxext or sse2 on x86-32
  1938. ; splat function arguments
  1939. SPLATB_REG m0, E_reg, %1, m7 ; E
  1940. SPLATB_REG m1, I_reg, %1, m7 ; I
  1941. SPLATB_REG m2, hev_thr_reg, %1, m7 ; hev_thresh
  1942. ; align stack
  1943. mov stack_reg, rsp ; backup stack pointer
  1944. and rsp, ~(mmsize-1) ; align stack
  1945. sub rsp, mmsize * 8 ; stack layout: [0]=E, [1]=I, [2]=hev_thr
  1946. ; [3]=hev() result
  1947. ; [4]=filter tmp result
  1948. ; [5]/[6] = p2/q2 backup
  1949. ; [7]=lim_res sign result
  1950. %define flim_E [rsp]
  1951. %define flim_I [rsp+mmsize]
  1952. %define hev_thr [rsp+mmsize*2]
  1953. %define mask_res [rsp+mmsize*3]
  1954. %define lim_res [rsp+mmsize*4]
  1955. %define p0backup [rsp+mmsize*3]
  1956. %define q0backup [rsp+mmsize*4]
  1957. %define p2backup [rsp+mmsize*5]
  1958. %define q2backup [rsp+mmsize*6]
  1959. %define lim_sign [rsp+mmsize*7]
  1960. mova flim_E, m0
  1961. mova flim_I, m1
  1962. mova hev_thr, m2
  1963. %else ; sse2 on x86-64
  1964. %define flim_E m9
  1965. %define flim_I m10
  1966. %define hev_thr m11
  1967. %define mask_res m12
  1968. %define lim_res m8
  1969. %define p0backup m12
  1970. %define q0backup m8
  1971. %define p2backup m13
  1972. %define q2backup m14
  1973. %define lim_sign m15
  1974. ; splat function arguments
  1975. SPLATB_REG flim_E, E_reg, %1, m7 ; E
  1976. SPLATB_REG flim_I, I_reg, %1, m7 ; I
  1977. SPLATB_REG hev_thr, hev_thr_reg, %1, m7 ; hev_thresh
  1978. %endif
  1979. %if mmsize == 8 && %4 == 16 ; mmx/mmxext
  1980. mov cnt_reg, 2
  1981. %endif
  1982. mov stride_reg, mstride_reg
  1983. neg mstride_reg
  1984. %ifidn %2, h
  1985. lea dst_reg, [dst_reg + stride_reg*4-4]
  1986. %if %4 == 8
  1987. lea dst8_reg, [dst8_reg+ stride_reg*4-4]
  1988. %endif
  1989. %endif
  1990. %if mmsize == 8
  1991. .next8px
  1992. %endif
  1993. ; read
  1994. lea dst2_reg, [dst_reg + stride_reg]
  1995. %ifidn %2, v
  1996. %if %4 == 8 && mmsize == 16
  1997. %define movrow movh
  1998. %else
  1999. %define movrow mova
  2000. %endif
  2001. movrow m0, [dst_reg +mstride_reg*4] ; p3
  2002. movrow m1, [dst2_reg+mstride_reg*4] ; p2
  2003. movrow m2, [dst_reg +mstride_reg*2] ; p1
  2004. movrow m5, [dst2_reg] ; q1
  2005. movrow m6, [dst2_reg+ stride_reg] ; q2
  2006. movrow m7, [dst2_reg+ stride_reg*2] ; q3
  2007. %if mmsize == 16 && %4 == 8
  2008. movhps m0, [dst8_reg+mstride_reg*4]
  2009. movhps m2, [dst8_reg+mstride_reg*2]
  2010. add dst8_reg, stride_reg
  2011. movhps m1, [dst8_reg+mstride_reg*4]
  2012. movhps m5, [dst8_reg]
  2013. movhps m6, [dst8_reg+ stride_reg]
  2014. movhps m7, [dst8_reg+ stride_reg*2]
  2015. add dst8_reg, mstride_reg
  2016. %endif
  2017. %elif mmsize == 8 ; mmx/mmxext (h)
  2018. ; read 8 rows of 8px each
  2019. movu m0, [dst_reg +mstride_reg*4]
  2020. movu m1, [dst2_reg+mstride_reg*4]
  2021. movu m2, [dst_reg +mstride_reg*2]
  2022. movu m3, [dst_reg +mstride_reg]
  2023. movu m4, [dst_reg]
  2024. movu m5, [dst2_reg]
  2025. movu m6, [dst2_reg+ stride_reg]
  2026. ; 8x8 transpose
  2027. TRANSPOSE4x4B 0, 1, 2, 3, 7
  2028. mova q0backup, m1
  2029. movu m7, [dst2_reg+ stride_reg*2]
  2030. TRANSPOSE4x4B 4, 5, 6, 7, 1
  2031. SBUTTERFLY dq, 0, 4, 1 ; p3/p2
  2032. SBUTTERFLY dq, 2, 6, 1 ; q0/q1
  2033. SBUTTERFLY dq, 3, 7, 1 ; q2/q3
  2034. mova m1, q0backup
  2035. mova q0backup, m2 ; store q0
  2036. SBUTTERFLY dq, 1, 5, 2 ; p1/p0
  2037. mova p0backup, m5 ; store p0
  2038. SWAP 1, 4
  2039. SWAP 2, 4
  2040. SWAP 6, 3
  2041. SWAP 5, 3
  2042. %else ; sse2 (h)
  2043. %if %4 == 16
  2044. lea dst8_reg, [dst_reg + stride_reg*8]
  2045. %endif
  2046. ; read 16 rows of 8px each, interleave
  2047. movh m0, [dst_reg +mstride_reg*4]
  2048. movh m1, [dst8_reg+mstride_reg*4]
  2049. movh m2, [dst_reg +mstride_reg*2]
  2050. movh m5, [dst8_reg+mstride_reg*2]
  2051. movh m3, [dst_reg +mstride_reg]
  2052. movh m6, [dst8_reg+mstride_reg]
  2053. movh m4, [dst_reg]
  2054. movh m7, [dst8_reg]
  2055. punpcklbw m0, m1 ; A/I
  2056. punpcklbw m2, m5 ; C/K
  2057. punpcklbw m3, m6 ; D/L
  2058. punpcklbw m4, m7 ; E/M
  2059. add dst8_reg, stride_reg
  2060. movh m1, [dst2_reg+mstride_reg*4]
  2061. movh m6, [dst8_reg+mstride_reg*4]
  2062. movh m5, [dst2_reg]
  2063. movh m7, [dst8_reg]
  2064. punpcklbw m1, m6 ; B/J
  2065. punpcklbw m5, m7 ; F/N
  2066. movh m6, [dst2_reg+ stride_reg]
  2067. movh m7, [dst8_reg+ stride_reg]
  2068. punpcklbw m6, m7 ; G/O
  2069. ; 8x16 transpose
  2070. TRANSPOSE4x4B 0, 1, 2, 3, 7
  2071. %ifdef m8
  2072. SWAP 1, 8
  2073. %else
  2074. mova q0backup, m1
  2075. %endif
  2076. movh m7, [dst2_reg+ stride_reg*2]
  2077. movh m1, [dst8_reg+ stride_reg*2]
  2078. punpcklbw m7, m1 ; H/P
  2079. TRANSPOSE4x4B 4, 5, 6, 7, 1
  2080. SBUTTERFLY dq, 0, 4, 1 ; p3/p2
  2081. SBUTTERFLY dq, 2, 6, 1 ; q0/q1
  2082. SBUTTERFLY dq, 3, 7, 1 ; q2/q3
  2083. %ifdef m8
  2084. SWAP 1, 8
  2085. SWAP 2, 8
  2086. %else
  2087. mova m1, q0backup
  2088. mova q0backup, m2 ; store q0
  2089. %endif
  2090. SBUTTERFLY dq, 1, 5, 2 ; p1/p0
  2091. %ifdef m12
  2092. SWAP 5, 12
  2093. %else
  2094. mova p0backup, m5 ; store p0
  2095. %endif
  2096. SWAP 1, 4
  2097. SWAP 2, 4
  2098. SWAP 6, 3
  2099. SWAP 5, 3
  2100. %endif
  2101. ; normal_limit for p3-p2, p2-p1, q3-q2 and q2-q1
  2102. mova m4, m1
  2103. SWAP 4, 1
  2104. psubusb m4, m0 ; p2-p3
  2105. psubusb m0, m1 ; p3-p2
  2106. por m0, m4 ; abs(p3-p2)
  2107. mova m4, m2
  2108. SWAP 4, 2
  2109. psubusb m4, m1 ; p1-p2
  2110. mova p2backup, m1
  2111. psubusb m1, m2 ; p2-p1
  2112. por m1, m4 ; abs(p2-p1)
  2113. mova m4, m6
  2114. SWAP 4, 6
  2115. psubusb m4, m7 ; q2-q3
  2116. psubusb m7, m6 ; q3-q2
  2117. por m7, m4 ; abs(q3-q2)
  2118. mova m4, m5
  2119. SWAP 4, 5
  2120. psubusb m4, m6 ; q1-q2
  2121. mova q2backup, m6
  2122. psubusb m6, m5 ; q2-q1
  2123. por m6, m4 ; abs(q2-q1)
  2124. %ifidn %1, mmx
  2125. mova m4, flim_I
  2126. pxor m3, m3
  2127. psubusb m0, m4
  2128. psubusb m1, m4
  2129. psubusb m7, m4
  2130. psubusb m6, m4
  2131. pcmpeqb m0, m3 ; abs(p3-p2) <= I
  2132. pcmpeqb m1, m3 ; abs(p2-p1) <= I
  2133. pcmpeqb m7, m3 ; abs(q3-q2) <= I
  2134. pcmpeqb m6, m3 ; abs(q2-q1) <= I
  2135. pand m0, m1
  2136. pand m7, m6
  2137. pand m0, m7
  2138. %else ; mmxext/sse2
  2139. pmaxub m0, m1
  2140. pmaxub m6, m7
  2141. pmaxub m0, m6
  2142. %endif
  2143. ; normal_limit and high_edge_variance for p1-p0, q1-q0
  2144. SWAP 7, 3 ; now m7 is zero
  2145. %ifidn %2, v
  2146. movrow m3, [dst_reg +mstride_reg] ; p0
  2147. %if mmsize == 16 && %4 == 8
  2148. movhps m3, [dst8_reg+mstride_reg]
  2149. %endif
  2150. %elifdef m12
  2151. SWAP 3, 12
  2152. %else
  2153. mova m3, p0backup
  2154. %endif
  2155. mova m1, m2
  2156. SWAP 1, 2
  2157. mova m6, m3
  2158. SWAP 3, 6
  2159. psubusb m1, m3 ; p1-p0
  2160. psubusb m6, m2 ; p0-p1
  2161. por m1, m6 ; abs(p1-p0)
  2162. %ifidn %1, mmx
  2163. mova m6, m1
  2164. psubusb m1, m4
  2165. psubusb m6, hev_thr
  2166. pcmpeqb m1, m7 ; abs(p1-p0) <= I
  2167. pcmpeqb m6, m7 ; abs(p1-p0) <= hev_thresh
  2168. pand m0, m1
  2169. mova mask_res, m6
  2170. %else ; mmxext/sse2
  2171. pmaxub m0, m1 ; max_I
  2172. SWAP 1, 4 ; max_hev_thresh
  2173. %endif
  2174. SWAP 6, 4 ; now m6 is I
  2175. %ifidn %2, v
  2176. movrow m4, [dst_reg] ; q0
  2177. %if mmsize == 16 && %4 == 8
  2178. movhps m4, [dst8_reg]
  2179. %endif
  2180. %elifdef m8
  2181. SWAP 4, 8
  2182. %else
  2183. mova m4, q0backup
  2184. %endif
  2185. mova m1, m4
  2186. SWAP 1, 4
  2187. mova m7, m5
  2188. SWAP 7, 5
  2189. psubusb m1, m5 ; q0-q1
  2190. psubusb m7, m4 ; q1-q0
  2191. por m1, m7 ; abs(q1-q0)
  2192. %ifidn %1, mmx
  2193. mova m7, m1
  2194. psubusb m1, m6
  2195. psubusb m7, hev_thr
  2196. pxor m6, m6
  2197. pcmpeqb m1, m6 ; abs(q1-q0) <= I
  2198. pcmpeqb m7, m6 ; abs(q1-q0) <= hev_thresh
  2199. mova m6, mask_res
  2200. pand m0, m1 ; abs([pq][321]-[pq][210]) <= I
  2201. pand m6, m7
  2202. %else ; mmxext/sse2
  2203. pxor m7, m7
  2204. pmaxub m0, m1
  2205. pmaxub m6, m1
  2206. psubusb m0, flim_I
  2207. psubusb m6, hev_thr
  2208. pcmpeqb m0, m7 ; max(abs(..)) <= I
  2209. pcmpeqb m6, m7 ; !(max(abs..) > thresh)
  2210. %endif
  2211. %ifdef m12
  2212. SWAP 6, 12
  2213. %else
  2214. mova mask_res, m6 ; !(abs(p1-p0) > hev_t || abs(q1-q0) > hev_t)
  2215. %endif
  2216. ; simple_limit
  2217. mova m1, m3
  2218. SWAP 1, 3
  2219. mova m6, m4 ; keep copies of p0/q0 around for later use
  2220. SWAP 6, 4
  2221. psubusb m1, m4 ; p0-q0
  2222. psubusb m6, m3 ; q0-p0
  2223. por m1, m6 ; abs(q0-p0)
  2224. paddusb m1, m1 ; m1=2*abs(q0-p0)
  2225. mova m7, m2
  2226. SWAP 7, 2
  2227. mova m6, m5
  2228. SWAP 6, 5
  2229. psubusb m7, m5 ; p1-q1
  2230. psubusb m6, m2 ; q1-p1
  2231. por m7, m6 ; abs(q1-p1)
  2232. pxor m6, m6
  2233. pand m7, [pb_FE]
  2234. psrlq m7, 1 ; abs(q1-p1)/2
  2235. paddusb m7, m1 ; abs(q0-p0)*2+abs(q1-p1)/2
  2236. psubusb m7, flim_E
  2237. pcmpeqb m7, m6 ; abs(q0-p0)*2+abs(q1-p1)/2 <= E
  2238. pand m0, m7 ; normal_limit result
  2239. ; filter_common; at this point, m2-m5=p1-q1 and m0 is filter_mask
  2240. %ifdef m8 ; x86-64 && sse2
  2241. mova m8, [pb_80]
  2242. %define pb_80_var m8
  2243. %else ; x86-32 or mmx/mmxext
  2244. %define pb_80_var [pb_80]
  2245. %endif
  2246. mova m1, m4
  2247. mova m7, m3
  2248. pxor m1, pb_80_var
  2249. pxor m7, pb_80_var
  2250. psubsb m1, m7 ; (signed) q0-p0
  2251. mova m6, m2
  2252. mova m7, m5
  2253. pxor m6, pb_80_var
  2254. pxor m7, pb_80_var
  2255. psubsb m6, m7 ; (signed) p1-q1
  2256. mova m7, mask_res
  2257. paddsb m6, m1
  2258. paddsb m6, m1
  2259. paddsb m6, m1
  2260. pand m6, m0
  2261. %ifdef m8
  2262. mova lim_res, m6 ; 3*(qp-p0)+(p1-q1) masked for filter_mbedge
  2263. pand lim_res, m7
  2264. %else
  2265. mova m0, m6
  2266. pand m0, m7
  2267. mova lim_res, m0
  2268. %endif
  2269. pandn m7, m6 ; 3*(q0-p0)+(p1-q1) masked for filter_common
  2270. mova m1, [pb_F8]
  2271. mova m6, m7
  2272. paddsb m7, [pb_3]
  2273. paddsb m6, [pb_4]
  2274. pand m7, m1
  2275. pand m6, m1
  2276. pxor m1, m1
  2277. pxor m0, m0
  2278. pcmpgtb m1, m7
  2279. psubb m0, m7
  2280. psrlq m7, 3 ; +f2
  2281. psrlq m0, 3 ; -f2
  2282. pand m0, m1
  2283. pandn m1, m7
  2284. psubusb m3, m0
  2285. paddusb m3, m1 ; p0+f2
  2286. pxor m1, m1
  2287. pxor m0, m0
  2288. pcmpgtb m0, m6
  2289. psubb m1, m6
  2290. psrlq m6, 3 ; +f1
  2291. psrlq m1, 3 ; -f1
  2292. pand m1, m0
  2293. pandn m0, m6
  2294. psubusb m4, m0
  2295. paddusb m4, m1 ; q0-f1
  2296. ; filter_mbedge (m2-m5 = p1-q1; lim_res carries w)
  2297. mova m7, [pw_63]
  2298. %ifdef m8
  2299. SWAP 1, 8
  2300. %else
  2301. mova m1, lim_res
  2302. %endif
  2303. pxor m0, m0
  2304. mova m6, m1
  2305. pcmpgtb m0, m1 ; which are negative
  2306. punpcklbw m6, m0 ; signed byte->word
  2307. punpckhbw m1, m0
  2308. mova lim_sign, m0
  2309. mova mask_res, m6 ; backup for later in filter
  2310. mova lim_res, m1
  2311. pmullw m6, [pw_27]
  2312. pmullw m1, [pw_27]
  2313. paddw m6, m7
  2314. paddw m1, m7
  2315. psraw m6, 7
  2316. psraw m1, 7
  2317. packsswb m6, m1 ; a0
  2318. pxor m1, m1
  2319. psubb m1, m6
  2320. pand m1, m0 ; -a0
  2321. pandn m0, m6 ; +a0
  2322. psubusb m3, m1
  2323. paddusb m4, m1
  2324. paddusb m3, m0 ; p0+a0
  2325. psubusb m4, m0 ; q0-a0
  2326. mova m6, mask_res
  2327. mova m1, lim_res
  2328. mova m0, lim_sign
  2329. pmullw m6, [pw_18]
  2330. pmullw m1, [pw_18]
  2331. paddw m6, m7
  2332. paddw m1, m7
  2333. psraw m6, 7
  2334. psraw m1, 7
  2335. packsswb m6, m1 ; a1
  2336. pxor m1, m1
  2337. psubb m1, m6
  2338. pand m1, m0 ; -a1
  2339. pandn m0, m6 ; +a1
  2340. psubusb m2, m1
  2341. paddusb m5, m1
  2342. paddusb m2, m0 ; p1+a1
  2343. psubusb m5, m0 ; q1-a1
  2344. %ifdef m8
  2345. SWAP 6, 12
  2346. SWAP 1, 8
  2347. %else
  2348. mova m6, mask_res
  2349. mova m1, lim_res
  2350. %endif
  2351. pmullw m6, [pw_9]
  2352. pmullw m1, [pw_9]
  2353. paddw m6, m7
  2354. paddw m1, m7
  2355. %ifdef m15
  2356. SWAP 7, 15
  2357. %else
  2358. mova m7, lim_sign
  2359. %endif
  2360. psraw m6, 7
  2361. psraw m1, 7
  2362. packsswb m6, m1 ; a1
  2363. pxor m0, m0
  2364. psubb m0, m6
  2365. pand m0, m7 ; -a1
  2366. pandn m7, m6 ; +a1
  2367. %ifdef m8
  2368. SWAP 1, 13
  2369. SWAP 6, 14
  2370. %else
  2371. mova m1, p2backup
  2372. mova m6, q2backup
  2373. %endif
  2374. psubusb m1, m0
  2375. paddusb m6, m0
  2376. paddusb m1, m7 ; p1+a1
  2377. psubusb m6, m7 ; q1-a1
  2378. ; store
  2379. %ifidn %2, v
  2380. movrow [dst2_reg+mstride_reg*4], m1
  2381. movrow [dst_reg +mstride_reg*2], m2
  2382. movrow [dst_reg +mstride_reg ], m3
  2383. movrow [dst_reg], m4
  2384. movrow [dst2_reg], m5
  2385. movrow [dst2_reg+ stride_reg ], m6
  2386. %if mmsize == 16 && %4 == 8
  2387. add dst8_reg, mstride_reg
  2388. movhps [dst8_reg+mstride_reg*2], m1
  2389. movhps [dst8_reg+mstride_reg ], m2
  2390. movhps [dst8_reg], m3
  2391. add dst8_reg, stride_reg
  2392. movhps [dst8_reg], m4
  2393. movhps [dst8_reg+ stride_reg ], m5
  2394. movhps [dst8_reg+ stride_reg*2], m6
  2395. %endif
  2396. %else ; h
  2397. inc dst_reg
  2398. inc dst2_reg
  2399. ; 4x8/16 transpose
  2400. TRANSPOSE4x4B 1, 2, 3, 4, 0
  2401. SBUTTERFLY bw, 5, 6, 0
  2402. %if mmsize == 8 ; mmx/mmxext (h)
  2403. WRITE_4x2D 1, 2, 3, 4, dst_reg, dst2_reg, mstride_reg, stride_reg
  2404. add dst_reg, 4
  2405. WRITE_8W m5, m6, dst2_reg, dst_reg, mstride_reg, stride_reg, %4
  2406. %else ; sse2 (h)
  2407. lea dst8_reg, [dst8_reg+mstride_reg+1]
  2408. WRITE_4x4D 1, 2, 3, 4, dst_reg, dst2_reg, dst8_reg, mstride_reg, stride_reg, %4
  2409. lea dst_reg, [dst2_reg+mstride_reg+4]
  2410. lea dst8_reg, [dst8_reg+mstride_reg+4]
  2411. WRITE_8W m5, m5, dst2_reg, dst_reg, mstride_reg, stride_reg, %2
  2412. %ifidn %2, sse4
  2413. lea dst_reg, [dst8_reg+ stride_reg]
  2414. %endif
  2415. WRITE_8W m6, m6, dst2_reg, dst8_reg, mstride_reg, stride_reg, %2
  2416. %endif
  2417. %endif
  2418. %if mmsize == 8
  2419. %if %4 == 8 ; chroma
  2420. %ifidn %2, h
  2421. sub dst_reg, 5
  2422. %endif
  2423. cmp dst_reg, dst8_reg
  2424. mov dst_reg, dst8_reg
  2425. jnz .next8px
  2426. %else
  2427. %ifidn %2, h
  2428. lea dst_reg, [dst_reg + stride_reg*8-5]
  2429. %else ; v
  2430. add dst_reg, 8
  2431. %endif
  2432. dec cnt_reg
  2433. jg .next8px
  2434. %endif
  2435. %endif
  2436. %ifndef m8 ; sse2 on x86-32 or mmx/mmxext
  2437. mov rsp, stack_reg ; restore stack pointer
  2438. %endif
  2439. RET
  2440. %endmacro
  2441. INIT_MMX
  2442. MBEDGE_LOOPFILTER mmx, v, 6, 16, 0
  2443. MBEDGE_LOOPFILTER mmx, h, 6, 16, 0
  2444. MBEDGE_LOOPFILTER mmxext, v, 6, 16, 0
  2445. MBEDGE_LOOPFILTER mmxext, h, 6, 16, 0
  2446. MBEDGE_LOOPFILTER mmx, v, 6, 8, 0
  2447. MBEDGE_LOOPFILTER mmx, h, 6, 8, 0
  2448. MBEDGE_LOOPFILTER mmxext, v, 6, 8, 0
  2449. MBEDGE_LOOPFILTER mmxext, h, 6, 8, 0
  2450. INIT_XMM
  2451. MBEDGE_LOOPFILTER sse2, v, 5, 16, 16
  2452. %ifdef m8
  2453. MBEDGE_LOOPFILTER sse2, h, 5, 16, 16
  2454. %else
  2455. MBEDGE_LOOPFILTER sse2, h, 6, 16, 16
  2456. %endif
  2457. MBEDGE_LOOPFILTER sse2, v, 6, 8, 16
  2458. MBEDGE_LOOPFILTER sse2, h, 6, 8, 16
  2459. MBEDGE_LOOPFILTER ssse3, v, 5, 16, 16
  2460. %ifdef m8
  2461. MBEDGE_LOOPFILTER ssse3, h, 5, 16, 16
  2462. %else
  2463. MBEDGE_LOOPFILTER ssse3, h, 6, 16, 16
  2464. %endif
  2465. MBEDGE_LOOPFILTER ssse3, v, 6, 8, 16
  2466. MBEDGE_LOOPFILTER ssse3, h, 6, 8, 16
  2467. %ifdef m8
  2468. MBEDGE_LOOPFILTER sse4, h, 5, 16, 16
  2469. %else
  2470. MBEDGE_LOOPFILTER sse4, h, 6, 16, 16
  2471. %endif
  2472. MBEDGE_LOOPFILTER sse4, h, 6, 8, 16