You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2859 lines
78KB

  1. ;******************************************************************************
  2. ;* VP8 MMXEXT optimizations
  3. ;* Copyright (c) 2010 Ronald S. Bultje <rsbultje@gmail.com>
  4. ;* Copyright (c) 2010 Jason Garrett-Glaser <darkshikari@gmail.com>
  5. ;*
  6. ;* This file is part of FFmpeg.
  7. ;*
  8. ;* FFmpeg is free software; you can redistribute it and/or
  9. ;* modify it under the terms of the GNU Lesser General Public
  10. ;* License as published by the Free Software Foundation; either
  11. ;* version 2.1 of the License, or (at your option) any later version.
  12. ;*
  13. ;* FFmpeg is distributed in the hope that it will be useful,
  14. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. ;* Lesser General Public License for more details.
  17. ;*
  18. ;* You should have received a copy of the GNU Lesser General Public
  19. ;* License along with FFmpeg; if not, write to the Free Software
  20. ;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. ;******************************************************************************
  22. %include "x86inc.asm"
  23. %include "x86util.asm"
  24. SECTION_RODATA
  25. fourtap_filter_hw_m: times 4 dw -6, 123
  26. times 4 dw 12, -1
  27. times 4 dw -9, 93
  28. times 4 dw 50, -6
  29. times 4 dw -6, 50
  30. times 4 dw 93, -9
  31. times 4 dw -1, 12
  32. times 4 dw 123, -6
  33. sixtap_filter_hw_m: times 4 dw 2, -11
  34. times 4 dw 108, 36
  35. times 4 dw -8, 1
  36. times 4 dw 3, -16
  37. times 4 dw 77, 77
  38. times 4 dw -16, 3
  39. times 4 dw 1, -8
  40. times 4 dw 36, 108
  41. times 4 dw -11, 2
  42. fourtap_filter_hb_m: times 8 db -6, 123
  43. times 8 db 12, -1
  44. times 8 db -9, 93
  45. times 8 db 50, -6
  46. times 8 db -6, 50
  47. times 8 db 93, -9
  48. times 8 db -1, 12
  49. times 8 db 123, -6
  50. sixtap_filter_hb_m: times 8 db 2, 1
  51. times 8 db -11, 108
  52. times 8 db 36, -8
  53. times 8 db 3, 3
  54. times 8 db -16, 77
  55. times 8 db 77, -16
  56. times 8 db 1, 2
  57. times 8 db -8, 36
  58. times 8 db 108, -11
  59. fourtap_filter_v_m: times 8 dw -6
  60. times 8 dw 123
  61. times 8 dw 12
  62. times 8 dw -1
  63. times 8 dw -9
  64. times 8 dw 93
  65. times 8 dw 50
  66. times 8 dw -6
  67. times 8 dw -6
  68. times 8 dw 50
  69. times 8 dw 93
  70. times 8 dw -9
  71. times 8 dw -1
  72. times 8 dw 12
  73. times 8 dw 123
  74. times 8 dw -6
  75. sixtap_filter_v_m: times 8 dw 2
  76. times 8 dw -11
  77. times 8 dw 108
  78. times 8 dw 36
  79. times 8 dw -8
  80. times 8 dw 1
  81. times 8 dw 3
  82. times 8 dw -16
  83. times 8 dw 77
  84. times 8 dw 77
  85. times 8 dw -16
  86. times 8 dw 3
  87. times 8 dw 1
  88. times 8 dw -8
  89. times 8 dw 36
  90. times 8 dw 108
  91. times 8 dw -11
  92. times 8 dw 2
  93. bilinear_filter_vw_m: times 8 dw 1
  94. times 8 dw 2
  95. times 8 dw 3
  96. times 8 dw 4
  97. times 8 dw 5
  98. times 8 dw 6
  99. times 8 dw 7
  100. bilinear_filter_vb_m: times 8 db 7, 1
  101. times 8 db 6, 2
  102. times 8 db 5, 3
  103. times 8 db 4, 4
  104. times 8 db 3, 5
  105. times 8 db 2, 6
  106. times 8 db 1, 7
  107. %ifdef PIC
  108. %define fourtap_filter_hw r11
  109. %define sixtap_filter_hw r11
  110. %define fourtap_filter_hb r11
  111. %define sixtap_filter_hb r11
  112. %define fourtap_filter_v r11
  113. %define sixtap_filter_v r11
  114. %define bilinear_filter_vw r11
  115. %define bilinear_filter_vb r11
  116. %else
  117. %define fourtap_filter_hw fourtap_filter_hw_m
  118. %define sixtap_filter_hw sixtap_filter_hw_m
  119. %define fourtap_filter_hb fourtap_filter_hb_m
  120. %define sixtap_filter_hb sixtap_filter_hb_m
  121. %define fourtap_filter_v fourtap_filter_v_m
  122. %define sixtap_filter_v sixtap_filter_v_m
  123. %define bilinear_filter_vw bilinear_filter_vw_m
  124. %define bilinear_filter_vb bilinear_filter_vb_m
  125. %endif
  126. filter_h2_shuf: db 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8
  127. filter_h4_shuf: db 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10
  128. filter_h6_shuf1: db 0, 5, 1, 6, 2, 7, 3, 8, 4, 9, 5, 10, 6, 11, 7, 12
  129. filter_h6_shuf2: db 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9
  130. filter_h6_shuf3: db 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11
  131. pw_20091: times 4 dw 20091
  132. pw_17734: times 4 dw 17734
  133. pb_27_63: times 8 db 27, 63
  134. pb_18_63: times 8 db 18, 63
  135. pb_9_63: times 8 db 9, 63
  136. cextern pb_1
  137. cextern pw_3
  138. cextern pb_3
  139. cextern pw_4
  140. cextern pb_4
  141. cextern pw_9
  142. cextern pw_18
  143. cextern pw_27
  144. cextern pw_63
  145. cextern pw_64
  146. cextern pb_80
  147. cextern pb_F8
  148. cextern pb_FE
  149. SECTION .text
  150. ;-----------------------------------------------------------------------------
  151. ; subpel MC functions:
  152. ;
  153. ; void put_vp8_epel<size>_h<htap>v<vtap>_<opt>(uint8_t *dst, int deststride,
  154. ; uint8_t *src, int srcstride,
  155. ; int height, int mx, int my);
  156. ;-----------------------------------------------------------------------------
  157. %macro FILTER_SSSE3 3
  158. cglobal put_vp8_epel%1_h6_ssse3, 6, 6, %2
  159. lea r5d, [r5*3]
  160. mova m3, [filter_h6_shuf2]
  161. mova m4, [filter_h6_shuf3]
  162. %ifdef PIC
  163. lea r11, [sixtap_filter_hb_m]
  164. %endif
  165. mova m5, [sixtap_filter_hb+r5*8-48] ; set up 6tap filter in bytes
  166. mova m6, [sixtap_filter_hb+r5*8-32]
  167. mova m7, [sixtap_filter_hb+r5*8-16]
  168. .nextrow
  169. movu m0, [r2-2]
  170. mova m1, m0
  171. mova m2, m0
  172. %ifidn %1, 4
  173. ; For epel4, we need 9 bytes, but only 8 get loaded; to compensate, do the
  174. ; shuffle with a memory operand
  175. punpcklbw m0, [r2+3]
  176. %else
  177. pshufb m0, [filter_h6_shuf1]
  178. %endif
  179. pshufb m1, m3
  180. pshufb m2, m4
  181. pmaddubsw m0, m5
  182. pmaddubsw m1, m6
  183. pmaddubsw m2, m7
  184. paddsw m0, m1
  185. paddsw m0, m2
  186. paddsw m0, [pw_64]
  187. psraw m0, 7
  188. packuswb m0, m0
  189. movh [r0], m0 ; store
  190. ; go to next line
  191. add r0, r1
  192. add r2, r3
  193. dec r4d ; next row
  194. jg .nextrow
  195. REP_RET
  196. cglobal put_vp8_epel%1_h4_ssse3, 6, 6, %3
  197. shl r5d, 4
  198. mova m2, [pw_64]
  199. mova m3, [filter_h2_shuf]
  200. mova m4, [filter_h4_shuf]
  201. %ifdef PIC
  202. lea r11, [fourtap_filter_hb_m]
  203. %endif
  204. mova m5, [fourtap_filter_hb+r5-16] ; set up 4tap filter in bytes
  205. mova m6, [fourtap_filter_hb+r5]
  206. .nextrow
  207. movu m0, [r2-1]
  208. mova m1, m0
  209. pshufb m0, m3
  210. pshufb m1, m4
  211. pmaddubsw m0, m5
  212. pmaddubsw m1, m6
  213. paddsw m0, m2
  214. paddsw m0, m1
  215. psraw m0, 7
  216. packuswb m0, m0
  217. movh [r0], m0 ; store
  218. ; go to next line
  219. add r0, r1
  220. add r2, r3
  221. dec r4d ; next row
  222. jg .nextrow
  223. REP_RET
  224. cglobal put_vp8_epel%1_v4_ssse3, 7, 7, %2
  225. shl r6d, 4
  226. %ifdef PIC
  227. lea r11, [fourtap_filter_hb_m]
  228. %endif
  229. mova m5, [fourtap_filter_hb+r6-16]
  230. mova m6, [fourtap_filter_hb+r6]
  231. mova m7, [pw_64]
  232. ; read 3 lines
  233. sub r2, r3
  234. movh m0, [r2]
  235. movh m1, [r2+ r3]
  236. movh m2, [r2+2*r3]
  237. add r2, r3
  238. .nextrow
  239. movh m3, [r2+2*r3] ; read new row
  240. mova m4, m0
  241. mova m0, m1
  242. punpcklbw m4, m1
  243. mova m1, m2
  244. punpcklbw m2, m3
  245. pmaddubsw m4, m5
  246. pmaddubsw m2, m6
  247. paddsw m4, m2
  248. mova m2, m3
  249. paddsw m4, m7
  250. psraw m4, 7
  251. packuswb m4, m4
  252. movh [r0], m4
  253. ; go to next line
  254. add r0, r1
  255. add r2, r3
  256. dec r4d ; next row
  257. jg .nextrow
  258. REP_RET
  259. cglobal put_vp8_epel%1_v6_ssse3, 7, 7, %2
  260. lea r6d, [r6*3]
  261. %ifdef PIC
  262. lea r11, [sixtap_filter_hb_m]
  263. %endif
  264. lea r6, [sixtap_filter_hb+r6*8]
  265. ; read 5 lines
  266. sub r2, r3
  267. sub r2, r3
  268. movh m0, [r2]
  269. movh m1, [r2+r3]
  270. movh m2, [r2+r3*2]
  271. lea r2, [r2+r3*2]
  272. add r2, r3
  273. movh m3, [r2]
  274. movh m4, [r2+r3]
  275. .nextrow
  276. movh m5, [r2+2*r3] ; read new row
  277. mova m6, m0
  278. punpcklbw m6, m5
  279. mova m0, m1
  280. punpcklbw m1, m2
  281. mova m7, m3
  282. punpcklbw m7, m4
  283. pmaddubsw m6, [r6-48]
  284. pmaddubsw m1, [r6-32]
  285. pmaddubsw m7, [r6-16]
  286. paddsw m6, m1
  287. paddsw m6, m7
  288. mova m1, m2
  289. paddsw m6, [pw_64]
  290. mova m2, m3
  291. psraw m6, 7
  292. mova m3, m4
  293. packuswb m6, m6
  294. mova m4, m5
  295. movh [r0], m6
  296. ; go to next line
  297. add r0, r1
  298. add r2, r3
  299. dec r4d ; next row
  300. jg .nextrow
  301. REP_RET
  302. %endmacro
  303. INIT_MMX
  304. FILTER_SSSE3 4, 0, 0
  305. INIT_XMM
  306. FILTER_SSSE3 8, 8, 7
  307. ; 4x4 block, H-only 4-tap filter
  308. cglobal put_vp8_epel4_h4_mmxext, 6, 6
  309. shl r5d, 4
  310. %ifdef PIC
  311. lea r11, [fourtap_filter_hw_m]
  312. %endif
  313. movq mm4, [fourtap_filter_hw+r5-16] ; set up 4tap filter in words
  314. movq mm5, [fourtap_filter_hw+r5]
  315. movq mm7, [pw_64]
  316. pxor mm6, mm6
  317. .nextrow
  318. movq mm1, [r2-1] ; (ABCDEFGH) load 8 horizontal pixels
  319. ; first set of 2 pixels
  320. movq mm2, mm1 ; byte ABCD..
  321. punpcklbw mm1, mm6 ; byte->word ABCD
  322. pshufw mm0, mm2, 9 ; byte CDEF..
  323. punpcklbw mm0, mm6 ; byte->word CDEF
  324. pshufw mm3, mm1, 0x94 ; word ABBC
  325. pshufw mm1, mm0, 0x94 ; word CDDE
  326. pmaddwd mm3, mm4 ; multiply 2px with F0/F1
  327. movq mm0, mm1 ; backup for second set of pixels
  328. pmaddwd mm1, mm5 ; multiply 2px with F2/F3
  329. paddd mm3, mm1 ; finish 1st 2px
  330. ; second set of 2 pixels, use backup of above
  331. punpckhbw mm2, mm6 ; byte->word EFGH
  332. pmaddwd mm0, mm4 ; multiply backed up 2px with F0/F1
  333. pshufw mm1, mm2, 0x94 ; word EFFG
  334. pmaddwd mm1, mm5 ; multiply 2px with F2/F3
  335. paddd mm0, mm1 ; finish 2nd 2px
  336. ; merge two sets of 2 pixels into one set of 4, round/clip/store
  337. packssdw mm3, mm0 ; merge dword->word (4px)
  338. paddsw mm3, mm7 ; rounding
  339. psraw mm3, 7
  340. packuswb mm3, mm6 ; clip and word->bytes
  341. movd [r0], mm3 ; store
  342. ; go to next line
  343. add r0, r1
  344. add r2, r3
  345. dec r4d ; next row
  346. jg .nextrow
  347. REP_RET
  348. ; 4x4 block, H-only 6-tap filter
  349. cglobal put_vp8_epel4_h6_mmxext, 6, 6
  350. lea r5d, [r5*3]
  351. %ifdef PIC
  352. lea r11, [sixtap_filter_hw_m]
  353. %endif
  354. movq mm4, [sixtap_filter_hw+r5*8-48] ; set up 4tap filter in words
  355. movq mm5, [sixtap_filter_hw+r5*8-32]
  356. movq mm6, [sixtap_filter_hw+r5*8-16]
  357. movq mm7, [pw_64]
  358. pxor mm3, mm3
  359. .nextrow
  360. movq mm1, [r2-2] ; (ABCDEFGH) load 8 horizontal pixels
  361. ; first set of 2 pixels
  362. movq mm2, mm1 ; byte ABCD..
  363. punpcklbw mm1, mm3 ; byte->word ABCD
  364. pshufw mm0, mm2, 0x9 ; byte CDEF..
  365. punpckhbw mm2, mm3 ; byte->word EFGH
  366. punpcklbw mm0, mm3 ; byte->word CDEF
  367. pshufw mm1, mm1, 0x94 ; word ABBC
  368. pshufw mm2, mm2, 0x94 ; word EFFG
  369. pmaddwd mm1, mm4 ; multiply 2px with F0/F1
  370. pshufw mm3, mm0, 0x94 ; word CDDE
  371. movq mm0, mm3 ; backup for second set of pixels
  372. pmaddwd mm3, mm5 ; multiply 2px with F2/F3
  373. paddd mm1, mm3 ; add to 1st 2px cache
  374. movq mm3, mm2 ; backup for second set of pixels
  375. pmaddwd mm2, mm6 ; multiply 2px with F4/F5
  376. paddd mm1, mm2 ; finish 1st 2px
  377. ; second set of 2 pixels, use backup of above
  378. movd mm2, [r2+3] ; byte FGHI (prevent overreads)
  379. pmaddwd mm0, mm4 ; multiply 1st backed up 2px with F0/F1
  380. pmaddwd mm3, mm5 ; multiply 2nd backed up 2px with F2/F3
  381. paddd mm0, mm3 ; add to 2nd 2px cache
  382. pxor mm3, mm3
  383. punpcklbw mm2, mm3 ; byte->word FGHI
  384. pshufw mm2, mm2, 0xE9 ; word GHHI
  385. pmaddwd mm2, mm6 ; multiply 2px with F4/F5
  386. paddd mm0, mm2 ; finish 2nd 2px
  387. ; merge two sets of 2 pixels into one set of 4, round/clip/store
  388. packssdw mm1, mm0 ; merge dword->word (4px)
  389. paddsw mm1, mm7 ; rounding
  390. psraw mm1, 7
  391. packuswb mm1, mm3 ; clip and word->bytes
  392. movd [r0], mm1 ; store
  393. ; go to next line
  394. add r0, r1
  395. add r2, r3
  396. dec r4d ; next row
  397. jg .nextrow
  398. REP_RET
  399. INIT_XMM
  400. cglobal put_vp8_epel8_h4_sse2, 6, 6, 10
  401. shl r5d, 5
  402. %ifdef PIC
  403. lea r11, [fourtap_filter_v_m]
  404. %endif
  405. lea r5, [fourtap_filter_v+r5-32]
  406. pxor m7, m7
  407. mova m4, [pw_64]
  408. mova m5, [r5+ 0]
  409. mova m6, [r5+16]
  410. %ifdef m8
  411. mova m8, [r5+32]
  412. mova m9, [r5+48]
  413. %endif
  414. .nextrow
  415. movq m0, [r2-1]
  416. movq m1, [r2-0]
  417. movq m2, [r2+1]
  418. movq m3, [r2+2]
  419. punpcklbw m0, m7
  420. punpcklbw m1, m7
  421. punpcklbw m2, m7
  422. punpcklbw m3, m7
  423. pmullw m0, m5
  424. pmullw m1, m6
  425. %ifdef m8
  426. pmullw m2, m8
  427. pmullw m3, m9
  428. %else
  429. pmullw m2, [r5+32]
  430. pmullw m3, [r5+48]
  431. %endif
  432. paddsw m0, m1
  433. paddsw m2, m3
  434. paddsw m0, m2
  435. paddsw m0, m4
  436. psraw m0, 7
  437. packuswb m0, m7
  438. movh [r0], m0 ; store
  439. ; go to next line
  440. add r0, r1
  441. add r2, r3
  442. dec r4d ; next row
  443. jg .nextrow
  444. REP_RET
  445. cglobal put_vp8_epel8_h6_sse2, 6, 6, 14
  446. lea r5d, [r5*3]
  447. shl r5d, 4
  448. %ifdef PIC
  449. lea r11, [sixtap_filter_v_m]
  450. %endif
  451. lea r5, [sixtap_filter_v+r5-96]
  452. pxor m7, m7
  453. mova m6, [pw_64]
  454. %ifdef m8
  455. mova m8, [r5+ 0]
  456. mova m9, [r5+16]
  457. mova m10, [r5+32]
  458. mova m11, [r5+48]
  459. mova m12, [r5+64]
  460. mova m13, [r5+80]
  461. %endif
  462. .nextrow
  463. movq m0, [r2-2]
  464. movq m1, [r2-1]
  465. movq m2, [r2-0]
  466. movq m3, [r2+1]
  467. movq m4, [r2+2]
  468. movq m5, [r2+3]
  469. punpcklbw m0, m7
  470. punpcklbw m1, m7
  471. punpcklbw m2, m7
  472. punpcklbw m3, m7
  473. punpcklbw m4, m7
  474. punpcklbw m5, m7
  475. %ifdef m8
  476. pmullw m0, m8
  477. pmullw m1, m9
  478. pmullw m2, m10
  479. pmullw m3, m11
  480. pmullw m4, m12
  481. pmullw m5, m13
  482. %else
  483. pmullw m0, [r5+ 0]
  484. pmullw m1, [r5+16]
  485. pmullw m2, [r5+32]
  486. pmullw m3, [r5+48]
  487. pmullw m4, [r5+64]
  488. pmullw m5, [r5+80]
  489. %endif
  490. paddsw m1, m4
  491. paddsw m0, m5
  492. paddsw m1, m2
  493. paddsw m0, m3
  494. paddsw m0, m1
  495. paddsw m0, m6
  496. psraw m0, 7
  497. packuswb m0, m7
  498. movh [r0], m0 ; store
  499. ; go to next line
  500. add r0, r1
  501. add r2, r3
  502. dec r4d ; next row
  503. jg .nextrow
  504. REP_RET
  505. %macro FILTER_V 3
  506. ; 4x4 block, V-only 4-tap filter
  507. cglobal put_vp8_epel%2_v4_%1, 7, 7, %3
  508. shl r6d, 5
  509. %ifdef PIC
  510. lea r11, [fourtap_filter_v_m]
  511. %endif
  512. lea r6, [fourtap_filter_v+r6-32]
  513. mova m6, [pw_64]
  514. pxor m7, m7
  515. mova m5, [r6+48]
  516. ; read 3 lines
  517. sub r2, r3
  518. movh m0, [r2]
  519. movh m1, [r2+ r3]
  520. movh m2, [r2+2*r3]
  521. add r2, r3
  522. punpcklbw m0, m7
  523. punpcklbw m1, m7
  524. punpcklbw m2, m7
  525. .nextrow
  526. ; first calculate negative taps (to prevent losing positive overflows)
  527. movh m4, [r2+2*r3] ; read new row
  528. punpcklbw m4, m7
  529. mova m3, m4
  530. pmullw m0, [r6+0]
  531. pmullw m4, m5
  532. paddsw m4, m0
  533. ; then calculate positive taps
  534. mova m0, m1
  535. pmullw m1, [r6+16]
  536. paddsw m4, m1
  537. mova m1, m2
  538. pmullw m2, [r6+32]
  539. paddsw m4, m2
  540. mova m2, m3
  541. ; round/clip/store
  542. paddsw m4, m6
  543. psraw m4, 7
  544. packuswb m4, m7
  545. movh [r0], m4
  546. ; go to next line
  547. add r0, r1
  548. add r2, r3
  549. dec r4d ; next row
  550. jg .nextrow
  551. REP_RET
  552. ; 4x4 block, V-only 6-tap filter
  553. cglobal put_vp8_epel%2_v6_%1, 7, 7, %3
  554. shl r6d, 4
  555. lea r6, [r6*3]
  556. %ifdef PIC
  557. lea r11, [sixtap_filter_v_m]
  558. %endif
  559. lea r6, [sixtap_filter_v+r6-96]
  560. pxor m7, m7
  561. ; read 5 lines
  562. sub r2, r3
  563. sub r2, r3
  564. movh m0, [r2]
  565. movh m1, [r2+r3]
  566. movh m2, [r2+r3*2]
  567. lea r2, [r2+r3*2]
  568. add r2, r3
  569. movh m3, [r2]
  570. movh m4, [r2+r3]
  571. punpcklbw m0, m7
  572. punpcklbw m1, m7
  573. punpcklbw m2, m7
  574. punpcklbw m3, m7
  575. punpcklbw m4, m7
  576. .nextrow
  577. ; first calculate negative taps (to prevent losing positive overflows)
  578. mova m5, m1
  579. pmullw m5, [r6+16]
  580. mova m6, m4
  581. pmullw m6, [r6+64]
  582. paddsw m6, m5
  583. ; then calculate positive taps
  584. movh m5, [r2+2*r3] ; read new row
  585. punpcklbw m5, m7
  586. pmullw m0, [r6+0]
  587. paddsw m6, m0
  588. mova m0, m1
  589. mova m1, m2
  590. pmullw m2, [r6+32]
  591. paddsw m6, m2
  592. mova m2, m3
  593. pmullw m3, [r6+48]
  594. paddsw m6, m3
  595. mova m3, m4
  596. mova m4, m5
  597. pmullw m5, [r6+80]
  598. paddsw m6, m5
  599. ; round/clip/store
  600. paddsw m6, [pw_64]
  601. psraw m6, 7
  602. packuswb m6, m7
  603. movh [r0], m6
  604. ; go to next line
  605. add r0, r1
  606. add r2, r3
  607. dec r4d ; next row
  608. jg .nextrow
  609. REP_RET
  610. %endmacro
  611. INIT_MMX
  612. FILTER_V mmxext, 4, 0
  613. INIT_XMM
  614. FILTER_V sse2, 8, 8
  615. %macro FILTER_BILINEAR 3
  616. cglobal put_vp8_bilinear%2_v_%1, 7,7,%3
  617. mov r5d, 8*16
  618. shl r6d, 4
  619. sub r5d, r6d
  620. %ifdef PIC
  621. lea r11, [bilinear_filter_vw_m]
  622. %endif
  623. pxor m6, m6
  624. mova m4, [bilinear_filter_vw+r5-16]
  625. mova m5, [bilinear_filter_vw+r6-16]
  626. .nextrow
  627. movh m0, [r2+r3*0]
  628. movh m1, [r2+r3*1]
  629. movh m3, [r2+r3*2]
  630. punpcklbw m0, m6
  631. punpcklbw m1, m6
  632. punpcklbw m3, m6
  633. mova m2, m1
  634. pmullw m0, m4
  635. pmullw m1, m5
  636. pmullw m2, m4
  637. pmullw m3, m5
  638. paddsw m0, m1
  639. paddsw m2, m3
  640. psraw m0, 2
  641. psraw m2, 2
  642. pavgw m0, m6
  643. pavgw m2, m6
  644. %ifidn %1, mmxext
  645. packuswb m0, m0
  646. packuswb m2, m2
  647. movh [r0+r1*0], m0
  648. movh [r0+r1*1], m2
  649. %else
  650. packuswb m0, m2
  651. movh [r0+r1*0], m0
  652. movhps [r0+r1*1], m0
  653. %endif
  654. lea r0, [r0+r1*2]
  655. lea r2, [r2+r3*2]
  656. sub r4d, 2
  657. jg .nextrow
  658. REP_RET
  659. cglobal put_vp8_bilinear%2_h_%1, 7,7,%3
  660. mov r6d, 8*16
  661. shl r5d, 4
  662. sub r6d, r5d
  663. %ifdef PIC
  664. lea r11, [bilinear_filter_vw_m]
  665. %endif
  666. pxor m6, m6
  667. mova m4, [bilinear_filter_vw+r6-16]
  668. mova m5, [bilinear_filter_vw+r5-16]
  669. .nextrow
  670. movh m0, [r2+r3*0+0]
  671. movh m1, [r2+r3*0+1]
  672. movh m2, [r2+r3*1+0]
  673. movh m3, [r2+r3*1+1]
  674. punpcklbw m0, m6
  675. punpcklbw m1, m6
  676. punpcklbw m2, m6
  677. punpcklbw m3, m6
  678. pmullw m0, m4
  679. pmullw m1, m5
  680. pmullw m2, m4
  681. pmullw m3, m5
  682. paddsw m0, m1
  683. paddsw m2, m3
  684. psraw m0, 2
  685. psraw m2, 2
  686. pavgw m0, m6
  687. pavgw m2, m6
  688. %ifidn %1, mmxext
  689. packuswb m0, m0
  690. packuswb m2, m2
  691. movh [r0+r1*0], m0
  692. movh [r0+r1*1], m2
  693. %else
  694. packuswb m0, m2
  695. movh [r0+r1*0], m0
  696. movhps [r0+r1*1], m0
  697. %endif
  698. lea r0, [r0+r1*2]
  699. lea r2, [r2+r3*2]
  700. sub r4d, 2
  701. jg .nextrow
  702. REP_RET
  703. %endmacro
  704. INIT_MMX
  705. FILTER_BILINEAR mmxext, 4, 0
  706. INIT_XMM
  707. FILTER_BILINEAR sse2, 8, 7
  708. %macro FILTER_BILINEAR_SSSE3 1
  709. cglobal put_vp8_bilinear%1_v_ssse3, 7,7
  710. shl r6d, 4
  711. %ifdef PIC
  712. lea r11, [bilinear_filter_vb_m]
  713. %endif
  714. pxor m4, m4
  715. mova m3, [bilinear_filter_vb+r6-16]
  716. .nextrow
  717. movh m0, [r2+r3*0]
  718. movh m1, [r2+r3*1]
  719. movh m2, [r2+r3*2]
  720. punpcklbw m0, m1
  721. punpcklbw m1, m2
  722. pmaddubsw m0, m3
  723. pmaddubsw m1, m3
  724. psraw m0, 2
  725. psraw m1, 2
  726. pavgw m0, m4
  727. pavgw m1, m4
  728. %if mmsize==8
  729. packuswb m0, m0
  730. packuswb m1, m1
  731. movh [r0+r1*0], m0
  732. movh [r0+r1*1], m1
  733. %else
  734. packuswb m0, m1
  735. movh [r0+r1*0], m0
  736. movhps [r0+r1*1], m0
  737. %endif
  738. lea r0, [r0+r1*2]
  739. lea r2, [r2+r3*2]
  740. sub r4d, 2
  741. jg .nextrow
  742. REP_RET
  743. cglobal put_vp8_bilinear%1_h_ssse3, 7,7
  744. shl r5d, 4
  745. %ifdef PIC
  746. lea r11, [bilinear_filter_vb_m]
  747. %endif
  748. pxor m4, m4
  749. mova m2, [filter_h2_shuf]
  750. mova m3, [bilinear_filter_vb+r5-16]
  751. .nextrow
  752. movu m0, [r2+r3*0]
  753. movu m1, [r2+r3*1]
  754. pshufb m0, m2
  755. pshufb m1, m2
  756. pmaddubsw m0, m3
  757. pmaddubsw m1, m3
  758. psraw m0, 2
  759. psraw m1, 2
  760. pavgw m0, m4
  761. pavgw m1, m4
  762. %if mmsize==8
  763. packuswb m0, m0
  764. packuswb m1, m1
  765. movh [r0+r1*0], m0
  766. movh [r0+r1*1], m1
  767. %else
  768. packuswb m0, m1
  769. movh [r0+r1*0], m0
  770. movhps [r0+r1*1], m0
  771. %endif
  772. lea r0, [r0+r1*2]
  773. lea r2, [r2+r3*2]
  774. sub r4d, 2
  775. jg .nextrow
  776. REP_RET
  777. %endmacro
  778. INIT_MMX
  779. FILTER_BILINEAR_SSSE3 4
  780. INIT_XMM
  781. FILTER_BILINEAR_SSSE3 8
  782. cglobal put_vp8_pixels8_mmx, 5,5
  783. .nextrow:
  784. movq mm0, [r2+r3*0]
  785. movq mm1, [r2+r3*1]
  786. lea r2, [r2+r3*2]
  787. movq [r0+r1*0], mm0
  788. movq [r0+r1*1], mm1
  789. lea r0, [r0+r1*2]
  790. sub r4d, 2
  791. jg .nextrow
  792. REP_RET
  793. cglobal put_vp8_pixels16_mmx, 5,5
  794. .nextrow:
  795. movq mm0, [r2+r3*0+0]
  796. movq mm1, [r2+r3*0+8]
  797. movq mm2, [r2+r3*1+0]
  798. movq mm3, [r2+r3*1+8]
  799. lea r2, [r2+r3*2]
  800. movq [r0+r1*0+0], mm0
  801. movq [r0+r1*0+8], mm1
  802. movq [r0+r1*1+0], mm2
  803. movq [r0+r1*1+8], mm3
  804. lea r0, [r0+r1*2]
  805. sub r4d, 2
  806. jg .nextrow
  807. REP_RET
  808. cglobal put_vp8_pixels16_sse, 5,5,2
  809. .nextrow:
  810. movups xmm0, [r2+r3*0]
  811. movups xmm1, [r2+r3*1]
  812. lea r2, [r2+r3*2]
  813. movaps [r0+r1*0], xmm0
  814. movaps [r0+r1*1], xmm1
  815. lea r0, [r0+r1*2]
  816. sub r4d, 2
  817. jg .nextrow
  818. REP_RET
  819. ;-----------------------------------------------------------------------------
  820. ; void vp8_idct_dc_add_<opt>(uint8_t *dst, DCTELEM block[16], int stride);
  821. ;-----------------------------------------------------------------------------
  822. %macro ADD_DC 4
  823. %4 m2, [r0+%3]
  824. %4 m3, [r0+r2+%3]
  825. %4 m4, [r1+%3]
  826. %4 m5, [r1+r2+%3]
  827. paddusb m2, %1
  828. paddusb m3, %1
  829. paddusb m4, %1
  830. paddusb m5, %1
  831. psubusb m2, %2
  832. psubusb m3, %2
  833. psubusb m4, %2
  834. psubusb m5, %2
  835. %4 [r0+%3], m2
  836. %4 [r0+r2+%3], m3
  837. %4 [r1+%3], m4
  838. %4 [r1+r2+%3], m5
  839. %endmacro
  840. INIT_MMX
  841. cglobal vp8_idct_dc_add_mmx, 3, 3
  842. ; load data
  843. movd m0, [r1]
  844. ; calculate DC
  845. paddw m0, [pw_4]
  846. pxor m1, m1
  847. psraw m0, 3
  848. movd [r1], m1
  849. psubw m1, m0
  850. packuswb m0, m0
  851. packuswb m1, m1
  852. punpcklbw m0, m0
  853. punpcklbw m1, m1
  854. punpcklwd m0, m0
  855. punpcklwd m1, m1
  856. ; add DC
  857. lea r1, [r0+r2*2]
  858. ADD_DC m0, m1, 0, movh
  859. RET
  860. INIT_XMM
  861. cglobal vp8_idct_dc_add_sse4, 3, 3, 6
  862. ; load data
  863. movd m0, [r1]
  864. pxor m1, m1
  865. ; calculate DC
  866. paddw m0, [pw_4]
  867. movd [r1], m1
  868. lea r1, [r0+r2*2]
  869. movd m2, [r0]
  870. movd m3, [r0+r2]
  871. movd m4, [r1]
  872. movd m5, [r1+r2]
  873. psraw m0, 3
  874. pshuflw m0, m0, 0
  875. punpcklqdq m0, m0
  876. punpckldq m2, m3
  877. punpckldq m4, m5
  878. punpcklbw m2, m1
  879. punpcklbw m4, m1
  880. paddw m2, m0
  881. paddw m4, m0
  882. packuswb m2, m4
  883. movd [r0], m2
  884. pextrd [r0+r2], m2, 1
  885. pextrd [r1], m2, 2
  886. pextrd [r1+r2], m2, 3
  887. RET
  888. ;-----------------------------------------------------------------------------
  889. ; void vp8_idct_dc_add4y_<opt>(uint8_t *dst, DCTELEM block[4][16], int stride);
  890. ;-----------------------------------------------------------------------------
  891. INIT_MMX
  892. cglobal vp8_idct_dc_add4y_mmx, 3, 3
  893. ; load data
  894. movd m0, [r1+32*0] ; A
  895. movd m1, [r1+32*2] ; C
  896. punpcklwd m0, [r1+32*1] ; A B
  897. punpcklwd m1, [r1+32*3] ; C D
  898. punpckldq m0, m1 ; A B C D
  899. pxor m6, m6
  900. ; calculate DC
  901. paddw m0, [pw_4]
  902. movd [r1+32*0], m6
  903. movd [r1+32*1], m6
  904. movd [r1+32*2], m6
  905. movd [r1+32*3], m6
  906. psraw m0, 3
  907. psubw m6, m0
  908. packuswb m0, m0
  909. packuswb m6, m6
  910. punpcklbw m0, m0 ; AABBCCDD
  911. punpcklbw m6, m6 ; AABBCCDD
  912. movq m1, m0
  913. movq m7, m6
  914. punpcklbw m0, m0 ; AAAABBBB
  915. punpckhbw m1, m1 ; CCCCDDDD
  916. punpcklbw m6, m6 ; AAAABBBB
  917. punpckhbw m7, m7 ; CCCCDDDD
  918. ; add DC
  919. lea r1, [r0+r2*2]
  920. ADD_DC m0, m6, 0, mova
  921. ADD_DC m1, m7, 8, mova
  922. RET
  923. INIT_XMM
  924. cglobal vp8_idct_dc_add4y_sse2, 3, 3, 6
  925. ; load data
  926. movd m0, [r1+32*0] ; A
  927. movd m1, [r1+32*2] ; C
  928. punpcklwd m0, [r1+32*1] ; A B
  929. punpcklwd m1, [r1+32*3] ; C D
  930. punpckldq m0, m1 ; A B C D
  931. pxor m1, m1
  932. ; calculate DC
  933. paddw m0, [pw_4]
  934. movd [r1+32*0], m1
  935. movd [r1+32*1], m1
  936. movd [r1+32*2], m1
  937. movd [r1+32*3], m1
  938. psraw m0, 3
  939. psubw m1, m0
  940. packuswb m0, m0
  941. packuswb m1, m1
  942. punpcklbw m0, m0
  943. punpcklbw m1, m1
  944. punpcklbw m0, m0
  945. punpcklbw m1, m1
  946. ; add DC
  947. lea r1, [r0+r2*2]
  948. ADD_DC m0, m1, 0, mova
  949. RET
  950. ;-----------------------------------------------------------------------------
  951. ; void vp8_idct_dc_add4uv_<opt>(uint8_t *dst, DCTELEM block[4][16], int stride);
  952. ;-----------------------------------------------------------------------------
  953. INIT_MMX
  954. cglobal vp8_idct_dc_add4uv_mmx, 3, 3
  955. ; load data
  956. movd m0, [r1+32*0] ; A
  957. movd m1, [r1+32*2] ; C
  958. punpcklwd m0, [r1+32*1] ; A B
  959. punpcklwd m1, [r1+32*3] ; C D
  960. punpckldq m0, m1 ; A B C D
  961. pxor m6, m6
  962. ; calculate DC
  963. paddw m0, [pw_4]
  964. movd [r1+32*0], m6
  965. movd [r1+32*1], m6
  966. movd [r1+32*2], m6
  967. movd [r1+32*3], m6
  968. psraw m0, 3
  969. psubw m6, m0
  970. packuswb m0, m0
  971. packuswb m6, m6
  972. punpcklbw m0, m0 ; AABBCCDD
  973. punpcklbw m6, m6 ; AABBCCDD
  974. movq m1, m0
  975. movq m7, m6
  976. punpcklbw m0, m0 ; AAAABBBB
  977. punpckhbw m1, m1 ; CCCCDDDD
  978. punpcklbw m6, m6 ; AAAABBBB
  979. punpckhbw m7, m7 ; CCCCDDDD
  980. ; add DC
  981. lea r1, [r0+r2*2]
  982. ADD_DC m0, m6, 0, mova
  983. lea r0, [r0+r2*4]
  984. lea r1, [r1+r2*4]
  985. ADD_DC m1, m7, 0, mova
  986. RET
  987. ;-----------------------------------------------------------------------------
  988. ; void vp8_idct_add_<opt>(uint8_t *dst, DCTELEM block[16], int stride);
  989. ;-----------------------------------------------------------------------------
  990. ; calculate %1=mul_35468(%1)-mul_20091(%2); %2=mul_20091(%1)+mul_35468(%2)
  991. ; this macro assumes that m6/m7 have words for 20091/17734 loaded
  992. %macro VP8_MULTIPLY_SUMSUB 4
  993. mova %3, %1
  994. mova %4, %2
  995. pmulhw %3, m6 ;20091(1)
  996. pmulhw %4, m6 ;20091(2)
  997. paddw %3, %1
  998. paddw %4, %2
  999. paddw %1, %1
  1000. paddw %2, %2
  1001. pmulhw %1, m7 ;35468(1)
  1002. pmulhw %2, m7 ;35468(2)
  1003. psubw %1, %4
  1004. paddw %2, %3
  1005. %endmacro
  1006. ; calculate x0=%1+%3; x1=%1-%3
  1007. ; x2=mul_35468(%2)-mul_20091(%4); x3=mul_20091(%2)+mul_35468(%4)
  1008. ; %1=x0+x3 (tmp0); %2=x1+x2 (tmp1); %3=x1-x2 (tmp2); %4=x0-x3 (tmp3)
  1009. ; %5/%6 are temporary registers
  1010. ; we assume m6/m7 have constant words 20091/17734 loaded in them
  1011. %macro VP8_IDCT_TRANSFORM4x4_1D 6
  1012. SUMSUB_BA m%3, m%1, m%5 ;t0, t1
  1013. VP8_MULTIPLY_SUMSUB m%2, m%4, m%5,m%6 ;t2, t3
  1014. SUMSUB_BA m%4, m%3, m%5 ;tmp0, tmp3
  1015. SUMSUB_BA m%2, m%1, m%5 ;tmp1, tmp2
  1016. SWAP %4, %1
  1017. SWAP %4, %3
  1018. %endmacro
  1019. INIT_MMX
  1020. %macro VP8_IDCT_ADD 1
  1021. cglobal vp8_idct_add_%1, 3, 3
  1022. ; load block data
  1023. movq m0, [r1+ 0]
  1024. movq m1, [r1+ 8]
  1025. movq m2, [r1+16]
  1026. movq m3, [r1+24]
  1027. movq m6, [pw_20091]
  1028. movq m7, [pw_17734]
  1029. %ifidn %1, sse
  1030. xorps xmm0, xmm0
  1031. movaps [r1+ 0], xmm0
  1032. movaps [r1+16], xmm0
  1033. %else
  1034. pxor m4, m4
  1035. movq [r1+ 0], m4
  1036. movq [r1+ 8], m4
  1037. movq [r1+16], m4
  1038. movq [r1+24], m4
  1039. %endif
  1040. ; actual IDCT
  1041. VP8_IDCT_TRANSFORM4x4_1D 0, 1, 2, 3, 4, 5
  1042. TRANSPOSE4x4W 0, 1, 2, 3, 4
  1043. paddw m0, [pw_4]
  1044. VP8_IDCT_TRANSFORM4x4_1D 0, 1, 2, 3, 4, 5
  1045. TRANSPOSE4x4W 0, 1, 2, 3, 4
  1046. ; store
  1047. pxor m4, m4
  1048. lea r1, [r0+2*r2]
  1049. STORE_DIFFx2 m0, m1, m6, m7, m4, 3, r0, r2
  1050. STORE_DIFFx2 m2, m3, m6, m7, m4, 3, r1, r2
  1051. RET
  1052. %endmacro
  1053. VP8_IDCT_ADD mmx
  1054. VP8_IDCT_ADD sse
  1055. ;-----------------------------------------------------------------------------
  1056. ; void vp8_luma_dc_wht_mmxext(DCTELEM block[4][4][16], DCTELEM dc[16])
  1057. ;-----------------------------------------------------------------------------
  1058. %macro SCATTER_WHT 3
  1059. movd r1d, m%1
  1060. movd r2d, m%2
  1061. mov [r0+2*16*(0+%3)], r1w
  1062. mov [r0+2*16*(1+%3)], r2w
  1063. shr r1d, 16
  1064. shr r2d, 16
  1065. psrlq m%1, 32
  1066. psrlq m%2, 32
  1067. mov [r0+2*16*(4+%3)], r1w
  1068. mov [r0+2*16*(5+%3)], r2w
  1069. movd r1d, m%1
  1070. movd r2d, m%2
  1071. mov [r0+2*16*(8+%3)], r1w
  1072. mov [r0+2*16*(9+%3)], r2w
  1073. shr r1d, 16
  1074. shr r2d, 16
  1075. mov [r0+2*16*(12+%3)], r1w
  1076. mov [r0+2*16*(13+%3)], r2w
  1077. %endmacro
  1078. %macro HADAMARD4_1D 4
  1079. SUMSUB_BADC m%2, m%1, m%4, m%3
  1080. SUMSUB_BADC m%4, m%2, m%3, m%1
  1081. SWAP %1, %4, %3
  1082. %endmacro
  1083. %macro VP8_DC_WHT 1
  1084. cglobal vp8_luma_dc_wht_%1, 2,3
  1085. movq m0, [r1]
  1086. movq m1, [r1+8]
  1087. movq m2, [r1+16]
  1088. movq m3, [r1+24]
  1089. %ifidn %1, sse
  1090. xorps xmm0, xmm0
  1091. movaps [r1+ 0], xmm0
  1092. movaps [r1+16], xmm0
  1093. %else
  1094. pxor m4, m4
  1095. movq [r1+ 0], m4
  1096. movq [r1+ 8], m4
  1097. movq [r1+16], m4
  1098. movq [r1+24], m4
  1099. %endif
  1100. HADAMARD4_1D 0, 1, 2, 3
  1101. TRANSPOSE4x4W 0, 1, 2, 3, 4
  1102. paddw m0, [pw_3]
  1103. HADAMARD4_1D 0, 1, 2, 3
  1104. psraw m0, 3
  1105. psraw m1, 3
  1106. psraw m2, 3
  1107. psraw m3, 3
  1108. SCATTER_WHT 0, 1, 0
  1109. SCATTER_WHT 2, 3, 2
  1110. RET
  1111. %endmacro
  1112. INIT_MMX
  1113. VP8_DC_WHT mmx
  1114. VP8_DC_WHT sse
  1115. ;-----------------------------------------------------------------------------
  1116. ; void vp8_h/v_loop_filter_simple_<opt>(uint8_t *dst, int stride, int flim);
  1117. ;-----------------------------------------------------------------------------
  1118. ; macro called with 7 mm register indexes as argument, and 4 regular registers
  1119. ;
  1120. ; first 4 mm registers will carry the transposed pixel data
  1121. ; the other three are scratchspace (one would be sufficient, but this allows
  1122. ; for more spreading/pipelining and thus faster execution on OOE CPUs)
  1123. ;
  1124. ; first two regular registers are buf+4*stride and buf+5*stride
  1125. ; third is -stride, fourth is +stride
  1126. %macro READ_8x4_INTERLEAVED 11
  1127. ; interleave 8 (A-H) rows of 4 pixels each
  1128. movd m%1, [%8+%10*4] ; A0-3
  1129. movd m%5, [%9+%10*4] ; B0-3
  1130. movd m%2, [%8+%10*2] ; C0-3
  1131. movd m%6, [%8+%10] ; D0-3
  1132. movd m%3, [%8] ; E0-3
  1133. movd m%7, [%9] ; F0-3
  1134. movd m%4, [%9+%11] ; G0-3
  1135. punpcklbw m%1, m%5 ; A/B interleaved
  1136. movd m%5, [%9+%11*2] ; H0-3
  1137. punpcklbw m%2, m%6 ; C/D interleaved
  1138. punpcklbw m%3, m%7 ; E/F interleaved
  1139. punpcklbw m%4, m%5 ; G/H interleaved
  1140. %endmacro
  1141. ; macro called with 7 mm register indexes as argument, and 5 regular registers
  1142. ; first 11 mean the same as READ_8x4_TRANSPOSED above
  1143. ; fifth regular register is scratchspace to reach the bottom 8 rows, it
  1144. ; will be set to second regular register + 8*stride at the end
  1145. %macro READ_16x4_INTERLEAVED 12
  1146. ; transpose 16 (A-P) rows of 4 pixels each
  1147. lea %12, [r0+8*r2]
  1148. ; read (and interleave) those addressable by %8 (=r0), A/C/D/E/I/K/L/M
  1149. movd m%1, [%8+%10*4] ; A0-3
  1150. movd m%3, [%12+%10*4] ; I0-3
  1151. movd m%2, [%8+%10*2] ; C0-3
  1152. movd m%4, [%12+%10*2] ; K0-3
  1153. movd m%6, [%8+%10] ; D0-3
  1154. movd m%5, [%12+%10] ; L0-3
  1155. movd m%7, [%12] ; M0-3
  1156. add %12, %11
  1157. punpcklbw m%1, m%3 ; A/I
  1158. movd m%3, [%8] ; E0-3
  1159. punpcklbw m%2, m%4 ; C/K
  1160. punpcklbw m%6, m%5 ; D/L
  1161. punpcklbw m%3, m%7 ; E/M
  1162. punpcklbw m%2, m%6 ; C/D/K/L interleaved
  1163. ; read (and interleave) those addressable by %9 (=r4), B/F/G/H/J/N/O/P
  1164. movd m%5, [%9+%10*4] ; B0-3
  1165. movd m%4, [%12+%10*4] ; J0-3
  1166. movd m%7, [%9] ; F0-3
  1167. movd m%6, [%12] ; N0-3
  1168. punpcklbw m%5, m%4 ; B/J
  1169. punpcklbw m%7, m%6 ; F/N
  1170. punpcklbw m%1, m%5 ; A/B/I/J interleaved
  1171. punpcklbw m%3, m%7 ; E/F/M/N interleaved
  1172. movd m%4, [%9+%11] ; G0-3
  1173. movd m%6, [%12+%11] ; O0-3
  1174. movd m%5, [%9+%11*2] ; H0-3
  1175. movd m%7, [%12+%11*2] ; P0-3
  1176. punpcklbw m%4, m%6 ; G/O
  1177. punpcklbw m%5, m%7 ; H/P
  1178. punpcklbw m%4, m%5 ; G/H/O/P interleaved
  1179. %endmacro
  1180. ; write 4 mm registers of 2 dwords each
  1181. ; first four arguments are mm register indexes containing source data
  1182. ; last four are registers containing buf+4*stride, buf+5*stride,
  1183. ; -stride and +stride
  1184. %macro WRITE_4x2D 8
  1185. ; write out (2 dwords per register)
  1186. movd [%5+%7*4], m%1
  1187. movd [%5+%7*2], m%2
  1188. movd [%5], m%3
  1189. movd [%6+%8], m%4
  1190. punpckhdq m%1, m%1
  1191. punpckhdq m%2, m%2
  1192. punpckhdq m%3, m%3
  1193. punpckhdq m%4, m%4
  1194. movd [%6+%7*4], m%1
  1195. movd [%5+%7], m%2
  1196. movd [%6], m%3
  1197. movd [%6+%8*2], m%4
  1198. %endmacro
  1199. ; write 4 xmm registers of 4 dwords each
  1200. ; arguments same as WRITE_2x4D, but with an extra register, so that the 5 regular
  1201. ; registers contain buf+4*stride, buf+5*stride, buf+12*stride, -stride and +stride
  1202. ; we add 1*stride to the third regular registry in the process
  1203. ; the 10th argument is 16 if it's a Y filter (i.e. all regular registers cover the
  1204. ; same memory region), or 8 if they cover two separate buffers (third one points to
  1205. ; a different memory region than the first two), allowing for more optimal code for
  1206. ; the 16-width case
  1207. %macro WRITE_4x4D 10
  1208. ; write out (4 dwords per register), start with dwords zero
  1209. movd [%5+%8*4], m%1
  1210. movd [%5], m%2
  1211. movd [%7+%8*4], m%3
  1212. movd [%7], m%4
  1213. ; store dwords 1
  1214. psrldq m%1, 4
  1215. psrldq m%2, 4
  1216. psrldq m%3, 4
  1217. psrldq m%4, 4
  1218. movd [%6+%8*4], m%1
  1219. movd [%6], m%2
  1220. %if %10 == 16
  1221. movd [%6+%9*4], m%3
  1222. %endif
  1223. movd [%7+%9], m%4
  1224. ; write dwords 2
  1225. psrldq m%1, 4
  1226. psrldq m%2, 4
  1227. %if %10 == 8
  1228. movd [%5+%8*2], m%1
  1229. movd %5d, m%3
  1230. %endif
  1231. psrldq m%3, 4
  1232. psrldq m%4, 4
  1233. %if %10 == 16
  1234. movd [%5+%8*2], m%1
  1235. %endif
  1236. movd [%6+%9], m%2
  1237. movd [%7+%8*2], m%3
  1238. movd [%7+%9*2], m%4
  1239. add %7, %9
  1240. ; store dwords 3
  1241. psrldq m%1, 4
  1242. psrldq m%2, 4
  1243. psrldq m%3, 4
  1244. psrldq m%4, 4
  1245. %if %10 == 8
  1246. mov [%7+%8*4], %5d
  1247. movd [%6+%8*2], m%1
  1248. %else
  1249. movd [%5+%8], m%1
  1250. %endif
  1251. movd [%6+%9*2], m%2
  1252. movd [%7+%8*2], m%3
  1253. movd [%7+%9*2], m%4
  1254. %endmacro
  1255. ; write 4 or 8 words in the mmx/xmm registers as 8 lines
  1256. ; 1 and 2 are the registers to write, this can be the same (for SSE2)
  1257. ; for pre-SSE4:
  1258. ; 3 is a general-purpose register that we will clobber
  1259. ; for SSE4:
  1260. ; 3 is a pointer to the destination's 5th line
  1261. ; 4 is a pointer to the destination's 4th line
  1262. ; 5/6 is -stride and +stride
  1263. %macro WRITE_2x4W 6
  1264. movd %3d, %1
  1265. punpckhdq %1, %1
  1266. mov [%4+%5*4], %3w
  1267. shr %3, 16
  1268. add %4, %6
  1269. mov [%4+%5*4], %3w
  1270. movd %3d, %1
  1271. add %4, %5
  1272. mov [%4+%5*2], %3w
  1273. shr %3, 16
  1274. mov [%4+%5 ], %3w
  1275. movd %3d, %2
  1276. punpckhdq %2, %2
  1277. mov [%4 ], %3w
  1278. shr %3, 16
  1279. mov [%4+%6 ], %3w
  1280. movd %3d, %2
  1281. add %4, %6
  1282. mov [%4+%6 ], %3w
  1283. shr %3, 16
  1284. mov [%4+%6*2], %3w
  1285. add %4, %5
  1286. %endmacro
  1287. %macro WRITE_8W_SSE2 5
  1288. movd %2d, %1
  1289. psrldq %1, 4
  1290. mov [%3+%4*4], %2w
  1291. shr %2, 16
  1292. add %3, %5
  1293. mov [%3+%4*4], %2w
  1294. movd %2d, %1
  1295. psrldq %1, 4
  1296. add %3, %4
  1297. mov [%3+%4*2], %2w
  1298. shr %2, 16
  1299. mov [%3+%4 ], %2w
  1300. movd %2d, %1
  1301. psrldq %1, 4
  1302. mov [%3 ], %2w
  1303. shr %2, 16
  1304. mov [%3+%5 ], %2w
  1305. movd %2d, %1
  1306. add %3, %5
  1307. mov [%3+%5 ], %2w
  1308. shr %2, 16
  1309. mov [%3+%5*2], %2w
  1310. %endmacro
  1311. %macro WRITE_8W_SSE4 5
  1312. pextrw [%3+%4*4], %1, 0
  1313. pextrw [%2+%4*4], %1, 1
  1314. pextrw [%3+%4*2], %1, 2
  1315. pextrw [%3+%4 ], %1, 3
  1316. pextrw [%3 ], %1, 4
  1317. pextrw [%2 ], %1, 5
  1318. pextrw [%2+%5 ], %1, 6
  1319. pextrw [%2+%5*2], %1, 7
  1320. %endmacro
  1321. %macro SPLATB_REG_MMX 2-3
  1322. movd %1, %2d
  1323. punpcklbw %1, %1
  1324. punpcklwd %1, %1
  1325. punpckldq %1, %1
  1326. %endmacro
  1327. %macro SPLATB_REG_MMXEXT 2-3
  1328. movd %1, %2d
  1329. punpcklbw %1, %1
  1330. pshufw %1, %1, 0x0
  1331. %endmacro
  1332. %macro SPLATB_REG_SSE2 2-3
  1333. movd %1, %2d
  1334. punpcklbw %1, %1
  1335. pshuflw %1, %1, 0x0
  1336. punpcklqdq %1, %1
  1337. %endmacro
  1338. %macro SPLATB_REG_SSSE3 3
  1339. movd %1, %2d
  1340. pshufb %1, %3
  1341. %endmacro
  1342. %macro SIMPLE_LOOPFILTER 4
  1343. cglobal vp8_%2_loop_filter_simple_%1, 3, %3, %4
  1344. %if mmsize == 8 ; mmx/mmxext
  1345. mov r3, 2
  1346. %endif
  1347. %ifnidn %1, sse2
  1348. %if mmsize == 16
  1349. pxor m0, m0
  1350. %endif
  1351. %endif
  1352. SPLATB_REG m7, r2, m0 ; splat "flim" into register
  1353. ; set up indexes to address 4 rows
  1354. mov r2, r1
  1355. neg r1
  1356. %ifidn %2, h
  1357. lea r0, [r0+4*r2-2]
  1358. %endif
  1359. %if mmsize == 8 ; mmx / mmxext
  1360. .next8px
  1361. %endif
  1362. %ifidn %2, v
  1363. ; read 4 half/full rows of pixels
  1364. mova m0, [r0+r1*2] ; p1
  1365. mova m1, [r0+r1] ; p0
  1366. mova m2, [r0] ; q0
  1367. mova m3, [r0+r2] ; q1
  1368. %else ; h
  1369. lea r4, [r0+r2]
  1370. %if mmsize == 8 ; mmx/mmxext
  1371. READ_8x4_INTERLEAVED 0, 1, 2, 3, 4, 5, 6, r0, r4, r1, r2
  1372. %else ; sse2
  1373. READ_16x4_INTERLEAVED 0, 1, 2, 3, 4, 5, 6, r0, r4, r1, r2, r3
  1374. %endif
  1375. TRANSPOSE4x4W 0, 1, 2, 3, 4
  1376. %endif
  1377. ; simple_limit
  1378. mova m5, m2 ; m5=backup of q0
  1379. mova m6, m1 ; m6=backup of p0
  1380. psubusb m1, m2 ; p0-q0
  1381. psubusb m2, m6 ; q0-p0
  1382. por m1, m2 ; FFABS(p0-q0)
  1383. paddusb m1, m1 ; m1=FFABS(p0-q0)*2
  1384. mova m4, m3
  1385. mova m2, m0
  1386. psubusb m3, m0 ; q1-p1
  1387. psubusb m0, m4 ; p1-q1
  1388. por m3, m0 ; FFABS(p1-q1)
  1389. mova m0, [pb_80]
  1390. pxor m2, m0
  1391. pxor m4, m0
  1392. psubsb m2, m4 ; m2=p1-q1 (signed) backup for below
  1393. pand m3, [pb_FE]
  1394. psrlq m3, 1 ; m3=FFABS(p1-q1)/2, this can be used signed
  1395. paddusb m3, m1
  1396. psubusb m3, m7
  1397. pxor m1, m1
  1398. pcmpeqb m3, m1 ; abs(p0-q0)*2+abs(p1-q1)/2<=flim mask(0xff/0x0)
  1399. ; filter_common (use m2/p1-q1, m4=q0, m6=p0, m5/q0-p0 and m3/mask)
  1400. mova m4, m5
  1401. pxor m5, m0
  1402. pxor m0, m6
  1403. psubsb m5, m0 ; q0-p0 (signed)
  1404. paddsb m2, m5
  1405. paddsb m2, m5
  1406. paddsb m2, m5 ; a=(p1-q1) + 3*(q0-p0)
  1407. pand m2, m3 ; apply filter mask (m3)
  1408. mova m3, [pb_F8]
  1409. mova m1, m2
  1410. paddsb m2, [pb_4] ; f1<<3=a+4
  1411. paddsb m1, [pb_3] ; f2<<3=a+3
  1412. pand m2, m3
  1413. pand m1, m3 ; cache f2<<3
  1414. pxor m0, m0
  1415. pxor m3, m3
  1416. pcmpgtb m0, m2 ; which values are <0?
  1417. psubb m3, m2 ; -f1<<3
  1418. psrlq m2, 3 ; +f1
  1419. psrlq m3, 3 ; -f1
  1420. pand m3, m0
  1421. pandn m0, m2
  1422. psubusb m4, m0
  1423. paddusb m4, m3 ; q0-f1
  1424. pxor m0, m0
  1425. pxor m3, m3
  1426. pcmpgtb m0, m1 ; which values are <0?
  1427. psubb m3, m1 ; -f2<<3
  1428. psrlq m1, 3 ; +f2
  1429. psrlq m3, 3 ; -f2
  1430. pand m3, m0
  1431. pandn m0, m1
  1432. paddusb m6, m0
  1433. psubusb m6, m3 ; p0+f2
  1434. ; store
  1435. %ifidn %2, v
  1436. mova [r0], m4
  1437. mova [r0+r1], m6
  1438. %else ; h
  1439. inc r0
  1440. SBUTTERFLY bw, 6, 4, 0
  1441. %if mmsize == 16 ; sse2
  1442. %ifidn %1, sse4
  1443. inc r4
  1444. %endif
  1445. WRITE_8W m6, r4, r0, r1, r2
  1446. lea r4, [r3+r1+1]
  1447. %ifidn %1, sse4
  1448. inc r3
  1449. %endif
  1450. WRITE_8W m4, r3, r4, r1, r2
  1451. %else ; mmx/mmxext
  1452. WRITE_2x4W m6, m4, r4, r0, r1, r2
  1453. %endif
  1454. %endif
  1455. %if mmsize == 8 ; mmx/mmxext
  1456. ; next 8 pixels
  1457. %ifidn %2, v
  1458. add r0, 8 ; advance 8 cols = pixels
  1459. %else ; h
  1460. lea r0, [r0+r2*8-1] ; advance 8 rows = lines
  1461. %endif
  1462. dec r3
  1463. jg .next8px
  1464. REP_RET
  1465. %else ; sse2
  1466. RET
  1467. %endif
  1468. %endmacro
  1469. INIT_MMX
  1470. %define SPLATB_REG SPLATB_REG_MMX
  1471. SIMPLE_LOOPFILTER mmx, v, 4, 0
  1472. SIMPLE_LOOPFILTER mmx, h, 5, 0
  1473. %define SPLATB_REG SPLATB_REG_MMXEXT
  1474. SIMPLE_LOOPFILTER mmxext, v, 4, 0
  1475. SIMPLE_LOOPFILTER mmxext, h, 5, 0
  1476. INIT_XMM
  1477. %define SPLATB_REG SPLATB_REG_SSE2
  1478. %define WRITE_8W WRITE_8W_SSE2
  1479. SIMPLE_LOOPFILTER sse2, v, 3, 8
  1480. SIMPLE_LOOPFILTER sse2, h, 5, 8
  1481. %define SPLATB_REG SPLATB_REG_SSSE3
  1482. SIMPLE_LOOPFILTER ssse3, v, 3, 8
  1483. SIMPLE_LOOPFILTER ssse3, h, 5, 8
  1484. %define WRITE_8W WRITE_8W_SSE4
  1485. SIMPLE_LOOPFILTER sse4, h, 5, 8
  1486. ;-----------------------------------------------------------------------------
  1487. ; void vp8_h/v_loop_filter<size>_inner_<opt>(uint8_t *dst, [uint8_t *v,] int stride,
  1488. ; int flimE, int flimI, int hev_thr);
  1489. ;-----------------------------------------------------------------------------
  1490. %macro INNER_LOOPFILTER 5
  1491. %if %4 == 8 ; chroma
  1492. cglobal vp8_%2_loop_filter8uv_inner_%1, 6, %3, %5
  1493. %define dst8_reg r1
  1494. %define mstride_reg r2
  1495. %define E_reg r3
  1496. %define I_reg r4
  1497. %define hev_thr_reg r5
  1498. %else ; luma
  1499. cglobal vp8_%2_loop_filter16y_inner_%1, 5, %3, %5
  1500. %define mstride_reg r1
  1501. %define E_reg r2
  1502. %define I_reg r3
  1503. %define hev_thr_reg r4
  1504. %ifdef m8 ; x86-64, sse2
  1505. %define dst8_reg r4
  1506. %elif mmsize == 16 ; x86-32, sse2
  1507. %define dst8_reg r5
  1508. %else ; x86-32, mmx/mmxext
  1509. %define cnt_reg r5
  1510. %endif
  1511. %endif
  1512. %define dst_reg r0
  1513. %define stride_reg E_reg
  1514. %define dst2_reg I_reg
  1515. %ifndef m8
  1516. %define stack_reg hev_thr_reg
  1517. %endif
  1518. %ifnidn %1, sse2
  1519. %if mmsize == 16
  1520. pxor m7, m7
  1521. %endif
  1522. %endif
  1523. %ifndef m8 ; mmx/mmxext or sse2 on x86-32
  1524. ; splat function arguments
  1525. SPLATB_REG m0, E_reg, m7 ; E
  1526. SPLATB_REG m1, I_reg, m7 ; I
  1527. SPLATB_REG m2, hev_thr_reg, m7 ; hev_thresh
  1528. ; align stack
  1529. mov stack_reg, rsp ; backup stack pointer
  1530. and rsp, ~(mmsize-1) ; align stack
  1531. %ifidn %2, v
  1532. sub rsp, mmsize * 4 ; stack layout: [0]=E, [1]=I, [2]=hev_thr
  1533. ; [3]=hev() result
  1534. %else ; h
  1535. sub rsp, mmsize * 5 ; extra storage space for transposes
  1536. %endif
  1537. %define flim_E [rsp]
  1538. %define flim_I [rsp+mmsize]
  1539. %define hev_thr [rsp+mmsize*2]
  1540. %define mask_res [rsp+mmsize*3]
  1541. %define p0backup [rsp+mmsize*3]
  1542. %define q0backup [rsp+mmsize*4]
  1543. mova flim_E, m0
  1544. mova flim_I, m1
  1545. mova hev_thr, m2
  1546. %else ; sse2 on x86-64
  1547. %define flim_E m9
  1548. %define flim_I m10
  1549. %define hev_thr m11
  1550. %define mask_res m12
  1551. %define p0backup m12
  1552. %define q0backup m8
  1553. ; splat function arguments
  1554. SPLATB_REG flim_E, E_reg, m7 ; E
  1555. SPLATB_REG flim_I, I_reg, m7 ; I
  1556. SPLATB_REG hev_thr, hev_thr_reg, m7 ; hev_thresh
  1557. %endif
  1558. %if mmsize == 8 && %4 == 16 ; mmx/mmxext
  1559. mov cnt_reg, 2
  1560. %endif
  1561. mov stride_reg, mstride_reg
  1562. neg mstride_reg
  1563. %ifidn %2, h
  1564. lea dst_reg, [dst_reg + stride_reg*4-4]
  1565. %if %4 == 8
  1566. lea dst8_reg, [dst8_reg+ stride_reg*4-4]
  1567. %endif
  1568. %endif
  1569. %if mmsize == 8
  1570. .next8px
  1571. %endif
  1572. ; read
  1573. lea dst2_reg, [dst_reg + stride_reg]
  1574. %ifidn %2, v
  1575. %if %4 == 8 && mmsize == 16
  1576. %define movrow movh
  1577. %else
  1578. %define movrow mova
  1579. %endif
  1580. movrow m0, [dst_reg +mstride_reg*4] ; p3
  1581. movrow m1, [dst2_reg+mstride_reg*4] ; p2
  1582. movrow m2, [dst_reg +mstride_reg*2] ; p1
  1583. movrow m5, [dst2_reg] ; q1
  1584. movrow m6, [dst2_reg+ stride_reg] ; q2
  1585. movrow m7, [dst2_reg+ stride_reg*2] ; q3
  1586. %if mmsize == 16 && %4 == 8
  1587. movhps m0, [dst8_reg+mstride_reg*4]
  1588. movhps m2, [dst8_reg+mstride_reg*2]
  1589. add dst8_reg, stride_reg
  1590. movhps m1, [dst8_reg+mstride_reg*4]
  1591. movhps m5, [dst8_reg]
  1592. movhps m6, [dst8_reg+ stride_reg]
  1593. movhps m7, [dst8_reg+ stride_reg*2]
  1594. add dst8_reg, mstride_reg
  1595. %endif
  1596. %elif mmsize == 8 ; mmx/mmxext (h)
  1597. ; read 8 rows of 8px each
  1598. movu m0, [dst_reg +mstride_reg*4]
  1599. movu m1, [dst2_reg+mstride_reg*4]
  1600. movu m2, [dst_reg +mstride_reg*2]
  1601. movu m3, [dst_reg +mstride_reg]
  1602. movu m4, [dst_reg]
  1603. movu m5, [dst2_reg]
  1604. movu m6, [dst2_reg+ stride_reg]
  1605. ; 8x8 transpose
  1606. TRANSPOSE4x4B 0, 1, 2, 3, 7
  1607. mova q0backup, m1
  1608. movu m7, [dst2_reg+ stride_reg*2]
  1609. TRANSPOSE4x4B 4, 5, 6, 7, 1
  1610. SBUTTERFLY dq, 0, 4, 1 ; p3/p2
  1611. SBUTTERFLY dq, 2, 6, 1 ; q0/q1
  1612. SBUTTERFLY dq, 3, 7, 1 ; q2/q3
  1613. mova m1, q0backup
  1614. mova q0backup, m2 ; store q0
  1615. SBUTTERFLY dq, 1, 5, 2 ; p1/p0
  1616. mova p0backup, m5 ; store p0
  1617. SWAP 1, 4
  1618. SWAP 2, 4
  1619. SWAP 6, 3
  1620. SWAP 5, 3
  1621. %else ; sse2 (h)
  1622. %if %4 == 16
  1623. lea dst8_reg, [dst_reg + stride_reg*8]
  1624. %endif
  1625. ; read 16 rows of 8px each, interleave
  1626. movh m0, [dst_reg +mstride_reg*4]
  1627. movh m1, [dst8_reg+mstride_reg*4]
  1628. movh m2, [dst_reg +mstride_reg*2]
  1629. movh m5, [dst8_reg+mstride_reg*2]
  1630. movh m3, [dst_reg +mstride_reg]
  1631. movh m6, [dst8_reg+mstride_reg]
  1632. movh m4, [dst_reg]
  1633. movh m7, [dst8_reg]
  1634. punpcklbw m0, m1 ; A/I
  1635. punpcklbw m2, m5 ; C/K
  1636. punpcklbw m3, m6 ; D/L
  1637. punpcklbw m4, m7 ; E/M
  1638. add dst8_reg, stride_reg
  1639. movh m1, [dst2_reg+mstride_reg*4]
  1640. movh m6, [dst8_reg+mstride_reg*4]
  1641. movh m5, [dst2_reg]
  1642. movh m7, [dst8_reg]
  1643. punpcklbw m1, m6 ; B/J
  1644. punpcklbw m5, m7 ; F/N
  1645. movh m6, [dst2_reg+ stride_reg]
  1646. movh m7, [dst8_reg+ stride_reg]
  1647. punpcklbw m6, m7 ; G/O
  1648. ; 8x16 transpose
  1649. TRANSPOSE4x4B 0, 1, 2, 3, 7
  1650. %ifdef m8
  1651. SWAP 1, 8
  1652. %else
  1653. mova q0backup, m1
  1654. %endif
  1655. movh m7, [dst2_reg+ stride_reg*2]
  1656. movh m1, [dst8_reg+ stride_reg*2]
  1657. punpcklbw m7, m1 ; H/P
  1658. TRANSPOSE4x4B 4, 5, 6, 7, 1
  1659. SBUTTERFLY dq, 0, 4, 1 ; p3/p2
  1660. SBUTTERFLY dq, 2, 6, 1 ; q0/q1
  1661. SBUTTERFLY dq, 3, 7, 1 ; q2/q3
  1662. %ifdef m8
  1663. SWAP 1, 8
  1664. SWAP 2, 8
  1665. %else
  1666. mova m1, q0backup
  1667. mova q0backup, m2 ; store q0
  1668. %endif
  1669. SBUTTERFLY dq, 1, 5, 2 ; p1/p0
  1670. %ifdef m12
  1671. SWAP 5, 12
  1672. %else
  1673. mova p0backup, m5 ; store p0
  1674. %endif
  1675. SWAP 1, 4
  1676. SWAP 2, 4
  1677. SWAP 6, 3
  1678. SWAP 5, 3
  1679. %endif
  1680. ; normal_limit for p3-p2, p2-p1, q3-q2 and q2-q1
  1681. mova m4, m1
  1682. SWAP 4, 1
  1683. psubusb m4, m0 ; p2-p3
  1684. psubusb m0, m1 ; p3-p2
  1685. por m0, m4 ; abs(p3-p2)
  1686. mova m4, m2
  1687. SWAP 4, 2
  1688. psubusb m4, m1 ; p1-p2
  1689. psubusb m1, m2 ; p2-p1
  1690. por m1, m4 ; abs(p2-p1)
  1691. mova m4, m6
  1692. SWAP 4, 6
  1693. psubusb m4, m7 ; q2-q3
  1694. psubusb m7, m6 ; q3-q2
  1695. por m7, m4 ; abs(q3-q2)
  1696. mova m4, m5
  1697. SWAP 4, 5
  1698. psubusb m4, m6 ; q1-q2
  1699. psubusb m6, m5 ; q2-q1
  1700. por m6, m4 ; abs(q2-q1)
  1701. %ifidn %1, mmx
  1702. mova m4, flim_I
  1703. pxor m3, m3
  1704. psubusb m0, m4
  1705. psubusb m1, m4
  1706. psubusb m7, m4
  1707. psubusb m6, m4
  1708. pcmpeqb m0, m3 ; abs(p3-p2) <= I
  1709. pcmpeqb m1, m3 ; abs(p2-p1) <= I
  1710. pcmpeqb m7, m3 ; abs(q3-q2) <= I
  1711. pcmpeqb m6, m3 ; abs(q2-q1) <= I
  1712. pand m0, m1
  1713. pand m7, m6
  1714. pand m0, m7
  1715. %else ; mmxext/sse2
  1716. pmaxub m0, m1
  1717. pmaxub m6, m7
  1718. pmaxub m0, m6
  1719. %endif
  1720. ; normal_limit and high_edge_variance for p1-p0, q1-q0
  1721. SWAP 7, 3 ; now m7 is zero
  1722. %ifidn %2, v
  1723. movrow m3, [dst_reg +mstride_reg] ; p0
  1724. %if mmsize == 16 && %4 == 8
  1725. movhps m3, [dst8_reg+mstride_reg]
  1726. %endif
  1727. %elifdef m12
  1728. SWAP 3, 12
  1729. %else
  1730. mova m3, p0backup
  1731. %endif
  1732. mova m1, m2
  1733. SWAP 1, 2
  1734. mova m6, m3
  1735. SWAP 3, 6
  1736. psubusb m1, m3 ; p1-p0
  1737. psubusb m6, m2 ; p0-p1
  1738. por m1, m6 ; abs(p1-p0)
  1739. %ifidn %1, mmx
  1740. mova m6, m1
  1741. psubusb m1, m4
  1742. psubusb m6, hev_thr
  1743. pcmpeqb m1, m7 ; abs(p1-p0) <= I
  1744. pcmpeqb m6, m7 ; abs(p1-p0) <= hev_thresh
  1745. pand m0, m1
  1746. mova mask_res, m6
  1747. %else ; mmxext/sse2
  1748. pmaxub m0, m1 ; max_I
  1749. SWAP 1, 4 ; max_hev_thresh
  1750. %endif
  1751. SWAP 6, 4 ; now m6 is I
  1752. %ifidn %2, v
  1753. movrow m4, [dst_reg] ; q0
  1754. %if mmsize == 16 && %4 == 8
  1755. movhps m4, [dst8_reg]
  1756. %endif
  1757. %elifdef m8
  1758. SWAP 4, 8
  1759. %else
  1760. mova m4, q0backup
  1761. %endif
  1762. mova m1, m4
  1763. SWAP 1, 4
  1764. mova m7, m5
  1765. SWAP 7, 5
  1766. psubusb m1, m5 ; q0-q1
  1767. psubusb m7, m4 ; q1-q0
  1768. por m1, m7 ; abs(q1-q0)
  1769. %ifidn %1, mmx
  1770. mova m7, m1
  1771. psubusb m1, m6
  1772. psubusb m7, hev_thr
  1773. pxor m6, m6
  1774. pcmpeqb m1, m6 ; abs(q1-q0) <= I
  1775. pcmpeqb m7, m6 ; abs(q1-q0) <= hev_thresh
  1776. mova m6, mask_res
  1777. pand m0, m1 ; abs([pq][321]-[pq][210]) <= I
  1778. pand m6, m7
  1779. %else ; mmxext/sse2
  1780. pxor m7, m7
  1781. pmaxub m0, m1
  1782. pmaxub m6, m1
  1783. psubusb m0, flim_I
  1784. psubusb m6, hev_thr
  1785. pcmpeqb m0, m7 ; max(abs(..)) <= I
  1786. pcmpeqb m6, m7 ; !(max(abs..) > thresh)
  1787. %endif
  1788. %ifdef m12
  1789. SWAP 6, 12
  1790. %else
  1791. mova mask_res, m6 ; !(abs(p1-p0) > hev_t || abs(q1-q0) > hev_t)
  1792. %endif
  1793. ; simple_limit
  1794. mova m1, m3
  1795. SWAP 1, 3
  1796. mova m6, m4 ; keep copies of p0/q0 around for later use
  1797. SWAP 6, 4
  1798. psubusb m1, m4 ; p0-q0
  1799. psubusb m6, m3 ; q0-p0
  1800. por m1, m6 ; abs(q0-p0)
  1801. paddusb m1, m1 ; m1=2*abs(q0-p0)
  1802. mova m7, m2
  1803. SWAP 7, 2
  1804. mova m6, m5
  1805. SWAP 6, 5
  1806. psubusb m7, m5 ; p1-q1
  1807. psubusb m6, m2 ; q1-p1
  1808. por m7, m6 ; abs(q1-p1)
  1809. pxor m6, m6
  1810. pand m7, [pb_FE]
  1811. psrlq m7, 1 ; abs(q1-p1)/2
  1812. paddusb m7, m1 ; abs(q0-p0)*2+abs(q1-p1)/2
  1813. psubusb m7, flim_E
  1814. pcmpeqb m7, m6 ; abs(q0-p0)*2+abs(q1-p1)/2 <= E
  1815. pand m0, m7 ; normal_limit result
  1816. ; filter_common; at this point, m2-m5=p1-q1 and m0 is filter_mask
  1817. %ifdef m8 ; x86-64 && sse2
  1818. mova m8, [pb_80]
  1819. %define pb_80_var m8
  1820. %else ; x86-32 or mmx/mmxext
  1821. %define pb_80_var [pb_80]
  1822. %endif
  1823. mova m1, m4
  1824. mova m7, m3
  1825. pxor m1, pb_80_var
  1826. pxor m7, pb_80_var
  1827. psubsb m1, m7 ; (signed) q0-p0
  1828. mova m6, m2
  1829. mova m7, m5
  1830. pxor m6, pb_80_var
  1831. pxor m7, pb_80_var
  1832. psubsb m6, m7 ; (signed) p1-q1
  1833. mova m7, mask_res
  1834. pandn m7, m6
  1835. paddsb m7, m1
  1836. paddsb m7, m1
  1837. paddsb m7, m1 ; 3*(q0-p0)+is4tap?(p1-q1)
  1838. pand m7, m0
  1839. mova m1, [pb_F8]
  1840. mova m6, m7
  1841. paddsb m7, [pb_3]
  1842. paddsb m6, [pb_4]
  1843. pand m7, m1
  1844. pand m6, m1
  1845. pxor m1, m1
  1846. pxor m0, m0
  1847. pcmpgtb m1, m7
  1848. psubb m0, m7
  1849. psrlq m7, 3 ; +f2
  1850. psrlq m0, 3 ; -f2
  1851. pand m0, m1
  1852. pandn m1, m7
  1853. psubusb m3, m0
  1854. paddusb m3, m1 ; p0+f2
  1855. pxor m1, m1
  1856. pxor m0, m0
  1857. pcmpgtb m0, m6
  1858. psubb m1, m6
  1859. psrlq m6, 3 ; +f1
  1860. psrlq m1, 3 ; -f1
  1861. pand m1, m0
  1862. pandn m0, m6
  1863. psubusb m4, m0
  1864. paddusb m4, m1 ; q0-f1
  1865. %ifdef m12
  1866. SWAP 6, 12
  1867. %else
  1868. mova m6, mask_res
  1869. %endif
  1870. %ifidn %1, mmx
  1871. mova m7, [pb_1]
  1872. %else ; mmxext/sse2
  1873. pxor m7, m7
  1874. %endif
  1875. pand m0, m6
  1876. pand m1, m6
  1877. %ifidn %1, mmx
  1878. paddusb m0, m7
  1879. pand m1, [pb_FE]
  1880. pandn m7, m0
  1881. psrlq m1, 1
  1882. psrlq m7, 1
  1883. SWAP 0, 7
  1884. %else ; mmxext/sse2
  1885. psubusb m1, [pb_1]
  1886. pavgb m0, m7 ; a
  1887. pavgb m1, m7 ; -a
  1888. %endif
  1889. psubusb m5, m0
  1890. psubusb m2, m1
  1891. paddusb m5, m1 ; q1-a
  1892. paddusb m2, m0 ; p1+a
  1893. ; store
  1894. %ifidn %2, v
  1895. movrow [dst_reg +mstride_reg*2], m2
  1896. movrow [dst_reg +mstride_reg ], m3
  1897. movrow [dst_reg], m4
  1898. movrow [dst_reg + stride_reg ], m5
  1899. %if mmsize == 16 && %4 == 8
  1900. movhps [dst8_reg+mstride_reg*2], m2
  1901. movhps [dst8_reg+mstride_reg ], m3
  1902. movhps [dst8_reg], m4
  1903. movhps [dst8_reg+ stride_reg ], m5
  1904. %endif
  1905. %else ; h
  1906. add dst_reg, 2
  1907. add dst2_reg, 2
  1908. ; 4x8/16 transpose
  1909. TRANSPOSE4x4B 2, 3, 4, 5, 6
  1910. %if mmsize == 8 ; mmx/mmxext (h)
  1911. WRITE_4x2D 2, 3, 4, 5, dst_reg, dst2_reg, mstride_reg, stride_reg
  1912. %else ; sse2 (h)
  1913. lea dst8_reg, [dst8_reg+mstride_reg+2]
  1914. WRITE_4x4D 2, 3, 4, 5, dst_reg, dst2_reg, dst8_reg, mstride_reg, stride_reg, %4
  1915. %endif
  1916. %endif
  1917. %if mmsize == 8
  1918. %if %4 == 8 ; chroma
  1919. %ifidn %2, h
  1920. sub dst_reg, 2
  1921. %endif
  1922. cmp dst_reg, dst8_reg
  1923. mov dst_reg, dst8_reg
  1924. jnz .next8px
  1925. %else
  1926. %ifidn %2, h
  1927. lea dst_reg, [dst_reg + stride_reg*8-2]
  1928. %else ; v
  1929. add dst_reg, 8
  1930. %endif
  1931. dec cnt_reg
  1932. jg .next8px
  1933. %endif
  1934. %endif
  1935. %ifndef m8 ; sse2 on x86-32 or mmx/mmxext
  1936. mov rsp, stack_reg ; restore stack pointer
  1937. %endif
  1938. RET
  1939. %endmacro
  1940. INIT_MMX
  1941. %define SPLATB_REG SPLATB_REG_MMX
  1942. INNER_LOOPFILTER mmx, v, 6, 16, 0
  1943. INNER_LOOPFILTER mmx, h, 6, 16, 0
  1944. INNER_LOOPFILTER mmx, v, 6, 8, 0
  1945. INNER_LOOPFILTER mmx, h, 6, 8, 0
  1946. %define SPLATB_REG SPLATB_REG_MMXEXT
  1947. INNER_LOOPFILTER mmxext, v, 6, 16, 0
  1948. INNER_LOOPFILTER mmxext, h, 6, 16, 0
  1949. INNER_LOOPFILTER mmxext, v, 6, 8, 0
  1950. INNER_LOOPFILTER mmxext, h, 6, 8, 0
  1951. INIT_XMM
  1952. %define SPLATB_REG SPLATB_REG_SSE2
  1953. INNER_LOOPFILTER sse2, v, 5, 16, 13
  1954. %ifdef m8
  1955. INNER_LOOPFILTER sse2, h, 5, 16, 13
  1956. %else
  1957. INNER_LOOPFILTER sse2, h, 6, 16, 13
  1958. %endif
  1959. INNER_LOOPFILTER sse2, v, 6, 8, 13
  1960. INNER_LOOPFILTER sse2, h, 6, 8, 13
  1961. %define SPLATB_REG SPLATB_REG_SSSE3
  1962. INNER_LOOPFILTER ssse3, v, 5, 16, 13
  1963. %ifdef m8
  1964. INNER_LOOPFILTER ssse3, h, 5, 16, 13
  1965. %else
  1966. INNER_LOOPFILTER ssse3, h, 6, 16, 13
  1967. %endif
  1968. INNER_LOOPFILTER ssse3, v, 6, 8, 13
  1969. INNER_LOOPFILTER ssse3, h, 6, 8, 13
  1970. ;-----------------------------------------------------------------------------
  1971. ; void vp8_h/v_loop_filter<size>_mbedge_<opt>(uint8_t *dst, [uint8_t *v,] int stride,
  1972. ; int flimE, int flimI, int hev_thr);
  1973. ;-----------------------------------------------------------------------------
  1974. %macro MBEDGE_LOOPFILTER 5
  1975. %if %4 == 8 ; chroma
  1976. cglobal vp8_%2_loop_filter8uv_mbedge_%1, 6, %3, %5
  1977. %define dst8_reg r1
  1978. %define mstride_reg r2
  1979. %define E_reg r3
  1980. %define I_reg r4
  1981. %define hev_thr_reg r5
  1982. %else ; luma
  1983. cglobal vp8_%2_loop_filter16y_mbedge_%1, 5, %3, %5
  1984. %define mstride_reg r1
  1985. %define E_reg r2
  1986. %define I_reg r3
  1987. %define hev_thr_reg r4
  1988. %ifdef m8 ; x86-64, sse2
  1989. %define dst8_reg r4
  1990. %elif mmsize == 16 ; x86-32, sse2
  1991. %define dst8_reg r5
  1992. %else ; x86-32, mmx/mmxext
  1993. %define cnt_reg r5
  1994. %endif
  1995. %endif
  1996. %define dst_reg r0
  1997. %define stride_reg E_reg
  1998. %define dst2_reg I_reg
  1999. %ifndef m8
  2000. %define stack_reg hev_thr_reg
  2001. %endif
  2002. %define ssse3_or_higher 0
  2003. %ifnidn %1, sse2
  2004. %if mmsize == 16
  2005. %define ssse3_or_higher 1
  2006. %endif
  2007. %endif
  2008. %if ssse3_or_higher
  2009. pxor m7, m7
  2010. %endif
  2011. %ifndef m8 ; mmx/mmxext or sse2 on x86-32
  2012. ; splat function arguments
  2013. SPLATB_REG m0, E_reg, m7 ; E
  2014. SPLATB_REG m1, I_reg, m7 ; I
  2015. SPLATB_REG m2, hev_thr_reg, m7 ; hev_thresh
  2016. ; align stack
  2017. mov stack_reg, rsp ; backup stack pointer
  2018. and rsp, ~(mmsize-1) ; align stack
  2019. %if mmsize == 16
  2020. sub rsp, mmsize * 7
  2021. %else
  2022. sub rsp, mmsize * 8 ; stack layout: [0]=E, [1]=I, [2]=hev_thr
  2023. ; [3]=hev() result
  2024. ; [4]=filter tmp result
  2025. ; [5]/[6] = p2/q2 backup
  2026. ; [7]=lim_res sign result
  2027. %endif
  2028. %define flim_E [rsp]
  2029. %define flim_I [rsp+mmsize]
  2030. %define hev_thr [rsp+mmsize*2]
  2031. %define mask_res [rsp+mmsize*3]
  2032. %define lim_res [rsp+mmsize*4]
  2033. %define p0backup [rsp+mmsize*3]
  2034. %define q0backup [rsp+mmsize*4]
  2035. %define p2backup [rsp+mmsize*5]
  2036. %define q2backup [rsp+mmsize*6]
  2037. %if mmsize == 16
  2038. %define lim_sign [rsp]
  2039. %else
  2040. %define lim_sign [rsp+mmsize*7]
  2041. %endif
  2042. mova flim_E, m0
  2043. mova flim_I, m1
  2044. mova hev_thr, m2
  2045. %else ; sse2 on x86-64
  2046. %define flim_E m9
  2047. %define flim_I m10
  2048. %define hev_thr m11
  2049. %define mask_res m12
  2050. %define lim_res m8
  2051. %define p0backup m12
  2052. %define q0backup m8
  2053. %define p2backup m13
  2054. %define q2backup m14
  2055. %define lim_sign m9
  2056. ; splat function arguments
  2057. SPLATB_REG flim_E, E_reg, m7 ; E
  2058. SPLATB_REG flim_I, I_reg, m7 ; I
  2059. SPLATB_REG hev_thr, hev_thr_reg, m7 ; hev_thresh
  2060. %endif
  2061. %if mmsize == 8 && %4 == 16 ; mmx/mmxext
  2062. mov cnt_reg, 2
  2063. %endif
  2064. mov stride_reg, mstride_reg
  2065. neg mstride_reg
  2066. %ifidn %2, h
  2067. lea dst_reg, [dst_reg + stride_reg*4-4]
  2068. %if %4 == 8
  2069. lea dst8_reg, [dst8_reg+ stride_reg*4-4]
  2070. %endif
  2071. %endif
  2072. %if mmsize == 8
  2073. .next8px
  2074. %endif
  2075. ; read
  2076. lea dst2_reg, [dst_reg + stride_reg]
  2077. %ifidn %2, v
  2078. %if %4 == 8 && mmsize == 16
  2079. %define movrow movh
  2080. %else
  2081. %define movrow mova
  2082. %endif
  2083. movrow m0, [dst_reg +mstride_reg*4] ; p3
  2084. movrow m1, [dst2_reg+mstride_reg*4] ; p2
  2085. movrow m2, [dst_reg +mstride_reg*2] ; p1
  2086. movrow m5, [dst2_reg] ; q1
  2087. movrow m6, [dst2_reg+ stride_reg] ; q2
  2088. movrow m7, [dst2_reg+ stride_reg*2] ; q3
  2089. %if mmsize == 16 && %4 == 8
  2090. movhps m0, [dst8_reg+mstride_reg*4]
  2091. movhps m2, [dst8_reg+mstride_reg*2]
  2092. add dst8_reg, stride_reg
  2093. movhps m1, [dst8_reg+mstride_reg*4]
  2094. movhps m5, [dst8_reg]
  2095. movhps m6, [dst8_reg+ stride_reg]
  2096. movhps m7, [dst8_reg+ stride_reg*2]
  2097. add dst8_reg, mstride_reg
  2098. %endif
  2099. %elif mmsize == 8 ; mmx/mmxext (h)
  2100. ; read 8 rows of 8px each
  2101. movu m0, [dst_reg +mstride_reg*4]
  2102. movu m1, [dst2_reg+mstride_reg*4]
  2103. movu m2, [dst_reg +mstride_reg*2]
  2104. movu m3, [dst_reg +mstride_reg]
  2105. movu m4, [dst_reg]
  2106. movu m5, [dst2_reg]
  2107. movu m6, [dst2_reg+ stride_reg]
  2108. ; 8x8 transpose
  2109. TRANSPOSE4x4B 0, 1, 2, 3, 7
  2110. mova q0backup, m1
  2111. movu m7, [dst2_reg+ stride_reg*2]
  2112. TRANSPOSE4x4B 4, 5, 6, 7, 1
  2113. SBUTTERFLY dq, 0, 4, 1 ; p3/p2
  2114. SBUTTERFLY dq, 2, 6, 1 ; q0/q1
  2115. SBUTTERFLY dq, 3, 7, 1 ; q2/q3
  2116. mova m1, q0backup
  2117. mova q0backup, m2 ; store q0
  2118. SBUTTERFLY dq, 1, 5, 2 ; p1/p0
  2119. mova p0backup, m5 ; store p0
  2120. SWAP 1, 4
  2121. SWAP 2, 4
  2122. SWAP 6, 3
  2123. SWAP 5, 3
  2124. %else ; sse2 (h)
  2125. %if %4 == 16
  2126. lea dst8_reg, [dst_reg + stride_reg*8]
  2127. %endif
  2128. ; read 16 rows of 8px each, interleave
  2129. movh m0, [dst_reg +mstride_reg*4]
  2130. movh m1, [dst8_reg+mstride_reg*4]
  2131. movh m2, [dst_reg +mstride_reg*2]
  2132. movh m5, [dst8_reg+mstride_reg*2]
  2133. movh m3, [dst_reg +mstride_reg]
  2134. movh m6, [dst8_reg+mstride_reg]
  2135. movh m4, [dst_reg]
  2136. movh m7, [dst8_reg]
  2137. punpcklbw m0, m1 ; A/I
  2138. punpcklbw m2, m5 ; C/K
  2139. punpcklbw m3, m6 ; D/L
  2140. punpcklbw m4, m7 ; E/M
  2141. add dst8_reg, stride_reg
  2142. movh m1, [dst2_reg+mstride_reg*4]
  2143. movh m6, [dst8_reg+mstride_reg*4]
  2144. movh m5, [dst2_reg]
  2145. movh m7, [dst8_reg]
  2146. punpcklbw m1, m6 ; B/J
  2147. punpcklbw m5, m7 ; F/N
  2148. movh m6, [dst2_reg+ stride_reg]
  2149. movh m7, [dst8_reg+ stride_reg]
  2150. punpcklbw m6, m7 ; G/O
  2151. ; 8x16 transpose
  2152. TRANSPOSE4x4B 0, 1, 2, 3, 7
  2153. %ifdef m8
  2154. SWAP 1, 8
  2155. %else
  2156. mova q0backup, m1
  2157. %endif
  2158. movh m7, [dst2_reg+ stride_reg*2]
  2159. movh m1, [dst8_reg+ stride_reg*2]
  2160. punpcklbw m7, m1 ; H/P
  2161. TRANSPOSE4x4B 4, 5, 6, 7, 1
  2162. SBUTTERFLY dq, 0, 4, 1 ; p3/p2
  2163. SBUTTERFLY dq, 2, 6, 1 ; q0/q1
  2164. SBUTTERFLY dq, 3, 7, 1 ; q2/q3
  2165. %ifdef m8
  2166. SWAP 1, 8
  2167. SWAP 2, 8
  2168. %else
  2169. mova m1, q0backup
  2170. mova q0backup, m2 ; store q0
  2171. %endif
  2172. SBUTTERFLY dq, 1, 5, 2 ; p1/p0
  2173. %ifdef m12
  2174. SWAP 5, 12
  2175. %else
  2176. mova p0backup, m5 ; store p0
  2177. %endif
  2178. SWAP 1, 4
  2179. SWAP 2, 4
  2180. SWAP 6, 3
  2181. SWAP 5, 3
  2182. %endif
  2183. ; normal_limit for p3-p2, p2-p1, q3-q2 and q2-q1
  2184. mova m4, m1
  2185. SWAP 4, 1
  2186. psubusb m4, m0 ; p2-p3
  2187. psubusb m0, m1 ; p3-p2
  2188. por m0, m4 ; abs(p3-p2)
  2189. mova m4, m2
  2190. SWAP 4, 2
  2191. psubusb m4, m1 ; p1-p2
  2192. mova p2backup, m1
  2193. psubusb m1, m2 ; p2-p1
  2194. por m1, m4 ; abs(p2-p1)
  2195. mova m4, m6
  2196. SWAP 4, 6
  2197. psubusb m4, m7 ; q2-q3
  2198. psubusb m7, m6 ; q3-q2
  2199. por m7, m4 ; abs(q3-q2)
  2200. mova m4, m5
  2201. SWAP 4, 5
  2202. psubusb m4, m6 ; q1-q2
  2203. mova q2backup, m6
  2204. psubusb m6, m5 ; q2-q1
  2205. por m6, m4 ; abs(q2-q1)
  2206. %ifidn %1, mmx
  2207. mova m4, flim_I
  2208. pxor m3, m3
  2209. psubusb m0, m4
  2210. psubusb m1, m4
  2211. psubusb m7, m4
  2212. psubusb m6, m4
  2213. pcmpeqb m0, m3 ; abs(p3-p2) <= I
  2214. pcmpeqb m1, m3 ; abs(p2-p1) <= I
  2215. pcmpeqb m7, m3 ; abs(q3-q2) <= I
  2216. pcmpeqb m6, m3 ; abs(q2-q1) <= I
  2217. pand m0, m1
  2218. pand m7, m6
  2219. pand m0, m7
  2220. %else ; mmxext/sse2
  2221. pmaxub m0, m1
  2222. pmaxub m6, m7
  2223. pmaxub m0, m6
  2224. %endif
  2225. ; normal_limit and high_edge_variance for p1-p0, q1-q0
  2226. SWAP 7, 3 ; now m7 is zero
  2227. %ifidn %2, v
  2228. movrow m3, [dst_reg +mstride_reg] ; p0
  2229. %if mmsize == 16 && %4 == 8
  2230. movhps m3, [dst8_reg+mstride_reg]
  2231. %endif
  2232. %elifdef m12
  2233. SWAP 3, 12
  2234. %else
  2235. mova m3, p0backup
  2236. %endif
  2237. mova m1, m2
  2238. SWAP 1, 2
  2239. mova m6, m3
  2240. SWAP 3, 6
  2241. psubusb m1, m3 ; p1-p0
  2242. psubusb m6, m2 ; p0-p1
  2243. por m1, m6 ; abs(p1-p0)
  2244. %ifidn %1, mmx
  2245. mova m6, m1
  2246. psubusb m1, m4
  2247. psubusb m6, hev_thr
  2248. pcmpeqb m1, m7 ; abs(p1-p0) <= I
  2249. pcmpeqb m6, m7 ; abs(p1-p0) <= hev_thresh
  2250. pand m0, m1
  2251. mova mask_res, m6
  2252. %else ; mmxext/sse2
  2253. pmaxub m0, m1 ; max_I
  2254. SWAP 1, 4 ; max_hev_thresh
  2255. %endif
  2256. SWAP 6, 4 ; now m6 is I
  2257. %ifidn %2, v
  2258. movrow m4, [dst_reg] ; q0
  2259. %if mmsize == 16 && %4 == 8
  2260. movhps m4, [dst8_reg]
  2261. %endif
  2262. %elifdef m8
  2263. SWAP 4, 8
  2264. %else
  2265. mova m4, q0backup
  2266. %endif
  2267. mova m1, m4
  2268. SWAP 1, 4
  2269. mova m7, m5
  2270. SWAP 7, 5
  2271. psubusb m1, m5 ; q0-q1
  2272. psubusb m7, m4 ; q1-q0
  2273. por m1, m7 ; abs(q1-q0)
  2274. %ifidn %1, mmx
  2275. mova m7, m1
  2276. psubusb m1, m6
  2277. psubusb m7, hev_thr
  2278. pxor m6, m6
  2279. pcmpeqb m1, m6 ; abs(q1-q0) <= I
  2280. pcmpeqb m7, m6 ; abs(q1-q0) <= hev_thresh
  2281. mova m6, mask_res
  2282. pand m0, m1 ; abs([pq][321]-[pq][210]) <= I
  2283. pand m6, m7
  2284. %else ; mmxext/sse2
  2285. pxor m7, m7
  2286. pmaxub m0, m1
  2287. pmaxub m6, m1
  2288. psubusb m0, flim_I
  2289. psubusb m6, hev_thr
  2290. pcmpeqb m0, m7 ; max(abs(..)) <= I
  2291. pcmpeqb m6, m7 ; !(max(abs..) > thresh)
  2292. %endif
  2293. %ifdef m12
  2294. SWAP 6, 12
  2295. %else
  2296. mova mask_res, m6 ; !(abs(p1-p0) > hev_t || abs(q1-q0) > hev_t)
  2297. %endif
  2298. ; simple_limit
  2299. mova m1, m3
  2300. SWAP 1, 3
  2301. mova m6, m4 ; keep copies of p0/q0 around for later use
  2302. SWAP 6, 4
  2303. psubusb m1, m4 ; p0-q0
  2304. psubusb m6, m3 ; q0-p0
  2305. por m1, m6 ; abs(q0-p0)
  2306. paddusb m1, m1 ; m1=2*abs(q0-p0)
  2307. mova m7, m2
  2308. SWAP 7, 2
  2309. mova m6, m5
  2310. SWAP 6, 5
  2311. psubusb m7, m5 ; p1-q1
  2312. psubusb m6, m2 ; q1-p1
  2313. por m7, m6 ; abs(q1-p1)
  2314. pxor m6, m6
  2315. pand m7, [pb_FE]
  2316. psrlq m7, 1 ; abs(q1-p1)/2
  2317. paddusb m7, m1 ; abs(q0-p0)*2+abs(q1-p1)/2
  2318. psubusb m7, flim_E
  2319. pcmpeqb m7, m6 ; abs(q0-p0)*2+abs(q1-p1)/2 <= E
  2320. pand m0, m7 ; normal_limit result
  2321. ; filter_common; at this point, m2-m5=p1-q1 and m0 is filter_mask
  2322. %ifdef m8 ; x86-64 && sse2
  2323. mova m8, [pb_80]
  2324. %define pb_80_var m8
  2325. %else ; x86-32 or mmx/mmxext
  2326. %define pb_80_var [pb_80]
  2327. %endif
  2328. mova m1, m4
  2329. mova m7, m3
  2330. pxor m1, pb_80_var
  2331. pxor m7, pb_80_var
  2332. psubsb m1, m7 ; (signed) q0-p0
  2333. mova m6, m2
  2334. mova m7, m5
  2335. pxor m6, pb_80_var
  2336. pxor m7, pb_80_var
  2337. psubsb m6, m7 ; (signed) p1-q1
  2338. mova m7, mask_res
  2339. paddsb m6, m1
  2340. paddsb m6, m1
  2341. paddsb m6, m1
  2342. pand m6, m0
  2343. %ifdef m8
  2344. mova lim_res, m6 ; 3*(qp-p0)+(p1-q1) masked for filter_mbedge
  2345. pand lim_res, m7
  2346. %else
  2347. mova m0, m6
  2348. pand m0, m7
  2349. mova lim_res, m0
  2350. %endif
  2351. pandn m7, m6 ; 3*(q0-p0)+(p1-q1) masked for filter_common
  2352. mova m1, [pb_F8]
  2353. mova m6, m7
  2354. paddsb m7, [pb_3]
  2355. paddsb m6, [pb_4]
  2356. pand m7, m1
  2357. pand m6, m1
  2358. pxor m1, m1
  2359. pxor m0, m0
  2360. pcmpgtb m1, m7
  2361. psubb m0, m7
  2362. psrlq m7, 3 ; +f2
  2363. psrlq m0, 3 ; -f2
  2364. pand m0, m1
  2365. pandn m1, m7
  2366. psubusb m3, m0
  2367. paddusb m3, m1 ; p0+f2
  2368. pxor m1, m1
  2369. pxor m0, m0
  2370. pcmpgtb m0, m6
  2371. psubb m1, m6
  2372. psrlq m6, 3 ; +f1
  2373. psrlq m1, 3 ; -f1
  2374. pand m1, m0
  2375. pandn m0, m6
  2376. psubusb m4, m0
  2377. paddusb m4, m1 ; q0-f1
  2378. ; filter_mbedge (m2-m5 = p1-q1; lim_res carries w)
  2379. %if ssse3_or_higher
  2380. mova m7, [pb_1]
  2381. %else
  2382. mova m7, [pw_63]
  2383. %endif
  2384. %ifdef m8
  2385. SWAP 1, 8
  2386. %else
  2387. mova m1, lim_res
  2388. %endif
  2389. pxor m0, m0
  2390. mova m6, m1
  2391. pcmpgtb m0, m1 ; which are negative
  2392. %if ssse3_or_higher
  2393. punpcklbw m6, m7 ; interleave with "1" for rounding
  2394. punpckhbw m1, m7
  2395. %else
  2396. punpcklbw m6, m0 ; signed byte->word
  2397. punpckhbw m1, m0
  2398. %endif
  2399. mova lim_sign, m0
  2400. %if ssse3_or_higher
  2401. mova m7, [pb_27_63]
  2402. %ifndef m8
  2403. mova lim_res, m1
  2404. %endif
  2405. %ifdef m10
  2406. SWAP 0, 10 ; don't lose lim_sign copy
  2407. %endif
  2408. mova m0, m7
  2409. pmaddubsw m7, m6
  2410. SWAP 6, 7
  2411. pmaddubsw m0, m1
  2412. SWAP 1, 0
  2413. %ifdef m10
  2414. SWAP 0, 10
  2415. %else
  2416. mova m0, lim_sign
  2417. %endif
  2418. %else
  2419. mova mask_res, m6 ; backup for later in filter
  2420. mova lim_res, m1
  2421. pmullw m6, [pw_27]
  2422. pmullw m1, [pw_27]
  2423. paddw m6, m7
  2424. paddw m1, m7
  2425. %endif
  2426. psraw m6, 7
  2427. psraw m1, 7
  2428. packsswb m6, m1 ; a0
  2429. pxor m1, m1
  2430. psubb m1, m6
  2431. pand m1, m0 ; -a0
  2432. pandn m0, m6 ; +a0
  2433. %if ssse3_or_higher
  2434. mova m6, [pb_18_63] ; pipelining
  2435. %endif
  2436. psubusb m3, m1
  2437. paddusb m4, m1
  2438. paddusb m3, m0 ; p0+a0
  2439. psubusb m4, m0 ; q0-a0
  2440. %if ssse3_or_higher
  2441. SWAP 6, 7
  2442. %ifdef m10
  2443. SWAP 1, 10
  2444. %else
  2445. mova m1, lim_res
  2446. %endif
  2447. mova m0, m7
  2448. pmaddubsw m7, m6
  2449. SWAP 6, 7
  2450. pmaddubsw m0, m1
  2451. SWAP 1, 0
  2452. %ifdef m10
  2453. SWAP 0, 10
  2454. %endif
  2455. mova m0, lim_sign
  2456. %else
  2457. mova m6, mask_res
  2458. mova m1, lim_res
  2459. pmullw m6, [pw_18]
  2460. pmullw m1, [pw_18]
  2461. paddw m6, m7
  2462. paddw m1, m7
  2463. %endif
  2464. mova m0, lim_sign
  2465. psraw m6, 7
  2466. psraw m1, 7
  2467. packsswb m6, m1 ; a1
  2468. pxor m1, m1
  2469. psubb m1, m6
  2470. pand m1, m0 ; -a1
  2471. pandn m0, m6 ; +a1
  2472. %if ssse3_or_higher
  2473. mova m6, [pb_9_63]
  2474. %endif
  2475. psubusb m2, m1
  2476. paddusb m5, m1
  2477. paddusb m2, m0 ; p1+a1
  2478. psubusb m5, m0 ; q1-a1
  2479. %if ssse3_or_higher
  2480. SWAP 6, 7
  2481. %ifdef m10
  2482. SWAP 1, 10
  2483. %else
  2484. mova m1, lim_res
  2485. %endif
  2486. mova m0, m7
  2487. pmaddubsw m7, m6
  2488. SWAP 6, 7
  2489. pmaddubsw m0, m1
  2490. SWAP 1, 0
  2491. %else
  2492. %ifdef m8
  2493. SWAP 6, 12
  2494. SWAP 1, 8
  2495. %else
  2496. mova m6, mask_res
  2497. mova m1, lim_res
  2498. %endif
  2499. pmullw m6, [pw_9]
  2500. pmullw m1, [pw_9]
  2501. paddw m6, m7
  2502. paddw m1, m7
  2503. %endif
  2504. %ifdef m9
  2505. SWAP 7, 9
  2506. %else
  2507. mova m7, lim_sign
  2508. %endif
  2509. psraw m6, 7
  2510. psraw m1, 7
  2511. packsswb m6, m1 ; a1
  2512. pxor m0, m0
  2513. psubb m0, m6
  2514. pand m0, m7 ; -a1
  2515. pandn m7, m6 ; +a1
  2516. %ifdef m8
  2517. SWAP 1, 13
  2518. SWAP 6, 14
  2519. %else
  2520. mova m1, p2backup
  2521. mova m6, q2backup
  2522. %endif
  2523. psubusb m1, m0
  2524. paddusb m6, m0
  2525. paddusb m1, m7 ; p1+a1
  2526. psubusb m6, m7 ; q1-a1
  2527. ; store
  2528. %ifidn %2, v
  2529. movrow [dst2_reg+mstride_reg*4], m1
  2530. movrow [dst_reg +mstride_reg*2], m2
  2531. movrow [dst_reg +mstride_reg ], m3
  2532. movrow [dst_reg], m4
  2533. movrow [dst2_reg], m5
  2534. movrow [dst2_reg+ stride_reg ], m6
  2535. %if mmsize == 16 && %4 == 8
  2536. add dst8_reg, mstride_reg
  2537. movhps [dst8_reg+mstride_reg*2], m1
  2538. movhps [dst8_reg+mstride_reg ], m2
  2539. movhps [dst8_reg], m3
  2540. add dst8_reg, stride_reg
  2541. movhps [dst8_reg], m4
  2542. movhps [dst8_reg+ stride_reg ], m5
  2543. movhps [dst8_reg+ stride_reg*2], m6
  2544. %endif
  2545. %else ; h
  2546. inc dst_reg
  2547. inc dst2_reg
  2548. ; 4x8/16 transpose
  2549. TRANSPOSE4x4B 1, 2, 3, 4, 0
  2550. SBUTTERFLY bw, 5, 6, 0
  2551. %if mmsize == 8 ; mmx/mmxext (h)
  2552. WRITE_4x2D 1, 2, 3, 4, dst_reg, dst2_reg, mstride_reg, stride_reg
  2553. add dst_reg, 4
  2554. WRITE_2x4W m5, m6, dst2_reg, dst_reg, mstride_reg, stride_reg
  2555. %else ; sse2 (h)
  2556. lea dst8_reg, [dst8_reg+mstride_reg+1]
  2557. WRITE_4x4D 1, 2, 3, 4, dst_reg, dst2_reg, dst8_reg, mstride_reg, stride_reg, %4
  2558. lea dst_reg, [dst2_reg+mstride_reg+4]
  2559. lea dst8_reg, [dst8_reg+mstride_reg+4]
  2560. %ifidn %1, sse4
  2561. add dst2_reg, 4
  2562. %endif
  2563. WRITE_8W m5, dst2_reg, dst_reg, mstride_reg, stride_reg
  2564. %ifidn %1, sse4
  2565. lea dst2_reg, [dst8_reg+ stride_reg]
  2566. %endif
  2567. WRITE_8W m6, dst2_reg, dst8_reg, mstride_reg, stride_reg
  2568. %endif
  2569. %endif
  2570. %if mmsize == 8
  2571. %if %4 == 8 ; chroma
  2572. %ifidn %2, h
  2573. sub dst_reg, 5
  2574. %endif
  2575. cmp dst_reg, dst8_reg
  2576. mov dst_reg, dst8_reg
  2577. jnz .next8px
  2578. %else
  2579. %ifidn %2, h
  2580. lea dst_reg, [dst_reg + stride_reg*8-5]
  2581. %else ; v
  2582. add dst_reg, 8
  2583. %endif
  2584. dec cnt_reg
  2585. jg .next8px
  2586. %endif
  2587. %endif
  2588. %ifndef m8 ; sse2 on x86-32 or mmx/mmxext
  2589. mov rsp, stack_reg ; restore stack pointer
  2590. %endif
  2591. RET
  2592. %endmacro
  2593. INIT_MMX
  2594. %define SPLATB_REG SPLATB_REG_MMX
  2595. MBEDGE_LOOPFILTER mmx, v, 6, 16, 0
  2596. MBEDGE_LOOPFILTER mmx, h, 6, 16, 0
  2597. MBEDGE_LOOPFILTER mmx, v, 6, 8, 0
  2598. MBEDGE_LOOPFILTER mmx, h, 6, 8, 0
  2599. %define SPLATB_REG SPLATB_REG_MMXEXT
  2600. MBEDGE_LOOPFILTER mmxext, v, 6, 16, 0
  2601. MBEDGE_LOOPFILTER mmxext, h, 6, 16, 0
  2602. MBEDGE_LOOPFILTER mmxext, v, 6, 8, 0
  2603. MBEDGE_LOOPFILTER mmxext, h, 6, 8, 0
  2604. INIT_XMM
  2605. %define SPLATB_REG SPLATB_REG_SSE2
  2606. %define WRITE_8W WRITE_8W_SSE2
  2607. MBEDGE_LOOPFILTER sse2, v, 5, 16, 15
  2608. %ifdef m8
  2609. MBEDGE_LOOPFILTER sse2, h, 5, 16, 15
  2610. %else
  2611. MBEDGE_LOOPFILTER sse2, h, 6, 16, 15
  2612. %endif
  2613. MBEDGE_LOOPFILTER sse2, v, 6, 8, 15
  2614. MBEDGE_LOOPFILTER sse2, h, 6, 8, 15
  2615. %define SPLATB_REG SPLATB_REG_SSSE3
  2616. MBEDGE_LOOPFILTER ssse3, v, 5, 16, 15
  2617. %ifdef m8
  2618. MBEDGE_LOOPFILTER ssse3, h, 5, 16, 15
  2619. %else
  2620. MBEDGE_LOOPFILTER ssse3, h, 6, 16, 15
  2621. %endif
  2622. MBEDGE_LOOPFILTER ssse3, v, 6, 8, 15
  2623. MBEDGE_LOOPFILTER ssse3, h, 6, 8, 15
  2624. %define WRITE_8W WRITE_8W_SSE4
  2625. %ifdef m8
  2626. MBEDGE_LOOPFILTER sse4, h, 5, 16, 15
  2627. %else
  2628. MBEDGE_LOOPFILTER sse4, h, 6, 16, 15
  2629. %endif
  2630. MBEDGE_LOOPFILTER sse4, h, 6, 8, 15