You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

917 lines
22KB

  1. ;*****************************************************************************
  2. ;* MMX/SSE2/AVX-optimized 10-bit H.264 deblocking code
  3. ;*****************************************************************************
  4. ;* Copyright (C) 2005-2011 x264 project
  5. ;*
  6. ;* Authors: Oskar Arvidsson <oskar@irock.se>
  7. ;* Loren Merritt <lorenm@u.washington.edu>
  8. ;* Jason Garrett-Glaser <darkshikari@gmail.com>
  9. ;*
  10. ;* This file is part of Libav.
  11. ;*
  12. ;* Libav is free software; you can redistribute it and/or
  13. ;* modify it under the terms of the GNU Lesser General Public
  14. ;* License as published by the Free Software Foundation; either
  15. ;* version 2.1 of the License, or (at your option) any later version.
  16. ;*
  17. ;* Libav is distributed in the hope that it will be useful,
  18. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  20. ;* Lesser General Public License for more details.
  21. ;*
  22. ;* You should have received a copy of the GNU Lesser General Public
  23. ;* License along with Libav; if not, write to the Free Software
  24. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  25. ;******************************************************************************
  26. %include "x86inc.asm"
  27. %include "x86util.asm"
  28. SECTION_RODATA
  29. pw_pixel_max: times 8 dw ((1 << 10)-1)
  30. SECTION .text
  31. cextern pw_2
  32. cextern pw_3
  33. cextern pw_4
  34. ; out: %4 = |%1-%2|-%3
  35. ; clobbers: %5
  36. %macro ABS_SUB 5
  37. psubusw %5, %2, %1
  38. psubusw %4, %1, %2
  39. por %4, %5
  40. psubw %4, %3
  41. %endmacro
  42. ; out: %4 = |%1-%2|<%3
  43. %macro DIFF_LT 5
  44. psubusw %4, %2, %1
  45. psubusw %5, %1, %2
  46. por %5, %4 ; |%1-%2|
  47. pxor %4, %4
  48. psubw %5, %3 ; |%1-%2|-%3
  49. pcmpgtw %4, %5 ; 0 > |%1-%2|-%3
  50. %endmacro
  51. %macro LOAD_AB 4
  52. movd %1, %3
  53. movd %2, %4
  54. SPLATW %1, %1
  55. SPLATW %2, %2
  56. %endmacro
  57. ; in: %2=tc reg
  58. ; out: %1=splatted tc
  59. %macro LOAD_TC 2
  60. movd %1, [%2]
  61. punpcklbw %1, %1
  62. %if mmsize == 8
  63. pshufw %1, %1, 0
  64. %else
  65. pshuflw %1, %1, 01010000b
  66. pshufd %1, %1, 01010000b
  67. %endif
  68. psraw %1, 6
  69. %endmacro
  70. ; in: %1=p1, %2=p0, %3=q0, %4=q1
  71. ; %5=alpha, %6=beta, %7-%9=tmp
  72. ; out: %7=mask
  73. %macro LOAD_MASK 9
  74. ABS_SUB %2, %3, %5, %8, %7 ; |p0-q0| - alpha
  75. ABS_SUB %1, %2, %6, %9, %7 ; |p1-p0| - beta
  76. pand %8, %9
  77. ABS_SUB %3, %4, %6, %9, %7 ; |q1-q0| - beta
  78. pxor %7, %7
  79. pand %8, %9
  80. pcmpgtw %7, %8
  81. %endmacro
  82. ; in: %1=p0, %2=q0, %3=p1, %4=q1, %5=mask, %6=tmp, %7=tmp
  83. ; out: %1=p0', m2=q0'
  84. %macro DEBLOCK_P0_Q0 7
  85. psubw %3, %4
  86. pxor %7, %7
  87. paddw %3, [pw_4]
  88. psubw %7, %5
  89. psubw %6, %2, %1
  90. psllw %6, 2
  91. paddw %3, %6
  92. psraw %3, 3
  93. mova %6, [pw_pixel_max]
  94. CLIPW %3, %7, %5
  95. pxor %7, %7
  96. paddw %1, %3
  97. psubw %2, %3
  98. CLIPW %1, %7, %6
  99. CLIPW %2, %7, %6
  100. %endmacro
  101. ; in: %1=x2, %2=x1, %3=p0, %4=q0 %5=mask&tc, %6=tmp
  102. %macro LUMA_Q1 6
  103. pavgw %6, %3, %4 ; (p0+q0+1)>>1
  104. paddw %1, %6
  105. pxor %6, %6
  106. psraw %1, 1
  107. psubw %6, %5
  108. psubw %1, %2
  109. CLIPW %1, %6, %5
  110. paddw %1, %2
  111. %endmacro
  112. %macro LUMA_DEBLOCK_ONE 3
  113. DIFF_LT m5, %1, bm, m4, m6
  114. pxor m6, m6
  115. mova %3, m4
  116. pcmpgtw m6, tcm
  117. pand m4, tcm
  118. pandn m6, m7
  119. pand m4, m6
  120. LUMA_Q1 m5, %2, m1, m2, m4, m6
  121. %endmacro
  122. %macro LUMA_H_STORE 2
  123. %if mmsize == 8
  124. movq [r0-4], m0
  125. movq [r0+r1-4], m1
  126. movq [r0+r1*2-4], m2
  127. movq [r0+%2-4], m3
  128. %else
  129. movq [r0-4], m0
  130. movhps [r0+r1-4], m0
  131. movq [r0+r1*2-4], m1
  132. movhps [%1-4], m1
  133. movq [%1+r1-4], m2
  134. movhps [%1+r1*2-4], m2
  135. movq [%1+%2-4], m3
  136. movhps [%1+r1*4-4], m3
  137. %endif
  138. %endmacro
  139. %macro DEBLOCK_LUMA 0
  140. ;-----------------------------------------------------------------------------
  141. ; void deblock_v_luma( uint16_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
  142. ;-----------------------------------------------------------------------------
  143. cglobal deblock_v_luma_10, 5,5,8*(mmsize/16)
  144. %assign pad 5*mmsize+12-(stack_offset&15)
  145. %define tcm [rsp]
  146. %define ms1 [rsp+mmsize]
  147. %define ms2 [rsp+mmsize*2]
  148. %define am [rsp+mmsize*3]
  149. %define bm [rsp+mmsize*4]
  150. SUB rsp, pad
  151. shl r2d, 2
  152. shl r3d, 2
  153. LOAD_AB m4, m5, r2d, r3d
  154. mov r3, 32/mmsize
  155. mov r2, r0
  156. sub r0, r1
  157. mova am, m4
  158. sub r0, r1
  159. mova bm, m5
  160. sub r0, r1
  161. .loop:
  162. mova m0, [r0+r1]
  163. mova m1, [r0+r1*2]
  164. mova m2, [r2]
  165. mova m3, [r2+r1]
  166. LOAD_MASK m0, m1, m2, m3, am, bm, m7, m4, m6
  167. LOAD_TC m6, r4
  168. mova tcm, m6
  169. mova m5, [r0]
  170. LUMA_DEBLOCK_ONE m1, m0, ms1
  171. mova [r0+r1], m5
  172. mova m5, [r2+r1*2]
  173. LUMA_DEBLOCK_ONE m2, m3, ms2
  174. mova [r2+r1], m5
  175. pxor m5, m5
  176. mova m6, tcm
  177. pcmpgtw m5, tcm
  178. psubw m6, ms1
  179. pandn m5, m7
  180. psubw m6, ms2
  181. pand m5, m6
  182. DEBLOCK_P0_Q0 m1, m2, m0, m3, m5, m7, m6
  183. mova [r0+r1*2], m1
  184. mova [r2], m2
  185. add r0, mmsize
  186. add r2, mmsize
  187. add r4, mmsize/8
  188. dec r3
  189. jg .loop
  190. ADD rsp, pad
  191. RET
  192. cglobal deblock_h_luma_10, 5,6,8*(mmsize/16)
  193. %assign pad 7*mmsize+12-(stack_offset&15)
  194. %define tcm [rsp]
  195. %define ms1 [rsp+mmsize]
  196. %define ms2 [rsp+mmsize*2]
  197. %define p1m [rsp+mmsize*3]
  198. %define p2m [rsp+mmsize*4]
  199. %define am [rsp+mmsize*5]
  200. %define bm [rsp+mmsize*6]
  201. SUB rsp, pad
  202. shl r2d, 2
  203. shl r3d, 2
  204. LOAD_AB m4, m5, r2d, r3d
  205. mov r3, r1
  206. mova am, m4
  207. add r3, r1
  208. mov r5, 32/mmsize
  209. mova bm, m5
  210. add r3, r1
  211. %if mmsize == 16
  212. mov r2, r0
  213. add r2, r3
  214. %endif
  215. .loop:
  216. %if mmsize == 8
  217. movq m2, [r0-8] ; y q2 q1 q0
  218. movq m7, [r0+0]
  219. movq m5, [r0+r1-8]
  220. movq m3, [r0+r1+0]
  221. movq m0, [r0+r1*2-8]
  222. movq m6, [r0+r1*2+0]
  223. movq m1, [r0+r3-8]
  224. TRANSPOSE4x4W 2, 5, 0, 1, 4
  225. SWAP 2, 7
  226. movq m7, [r0+r3]
  227. TRANSPOSE4x4W 2, 3, 6, 7, 4
  228. %else
  229. movu m5, [r0-8] ; y q2 q1 q0 p0 p1 p2 x
  230. movu m0, [r0+r1-8]
  231. movu m2, [r0+r1*2-8]
  232. movu m3, [r2-8]
  233. TRANSPOSE4x4W 5, 0, 2, 3, 6
  234. mova tcm, m3
  235. movu m4, [r2+r1-8]
  236. movu m1, [r2+r1*2-8]
  237. movu m3, [r2+r3-8]
  238. movu m7, [r2+r1*4-8]
  239. TRANSPOSE4x4W 4, 1, 3, 7, 6
  240. mova m6, tcm
  241. punpcklqdq m6, m7
  242. punpckhqdq m5, m4
  243. SBUTTERFLY qdq, 0, 1, 7
  244. SBUTTERFLY qdq, 2, 3, 7
  245. %endif
  246. mova p2m, m6
  247. LOAD_MASK m0, m1, m2, m3, am, bm, m7, m4, m6
  248. LOAD_TC m6, r4
  249. mova tcm, m6
  250. LUMA_DEBLOCK_ONE m1, m0, ms1
  251. mova p1m, m5
  252. mova m5, p2m
  253. LUMA_DEBLOCK_ONE m2, m3, ms2
  254. mova p2m, m5
  255. pxor m5, m5
  256. mova m6, tcm
  257. pcmpgtw m5, tcm
  258. psubw m6, ms1
  259. pandn m5, m7
  260. psubw m6, ms2
  261. pand m5, m6
  262. DEBLOCK_P0_Q0 m1, m2, m0, m3, m5, m7, m6
  263. mova m0, p1m
  264. mova m3, p2m
  265. TRANSPOSE4x4W 0, 1, 2, 3, 4
  266. LUMA_H_STORE r2, r3
  267. add r4, mmsize/8
  268. lea r0, [r0+r1*(mmsize/2)]
  269. lea r2, [r2+r1*(mmsize/2)]
  270. dec r5
  271. jg .loop
  272. ADD rsp, pad
  273. RET
  274. %endmacro
  275. %if ARCH_X86_64
  276. ; in: m0=p1, m1=p0, m2=q0, m3=q1, m8=p2, m9=q2
  277. ; m12=alpha, m13=beta
  278. ; out: m0=p1', m3=q1', m1=p0', m2=q0'
  279. ; clobbers: m4, m5, m6, m7, m10, m11, m14
  280. %macro DEBLOCK_LUMA_INTER_SSE2 0
  281. LOAD_MASK m0, m1, m2, m3, m12, m13, m7, m4, m6
  282. LOAD_TC m6, r4
  283. DIFF_LT m8, m1, m13, m10, m4
  284. DIFF_LT m9, m2, m13, m11, m4
  285. pand m6, m7
  286. mova m14, m6
  287. pxor m4, m4
  288. pcmpgtw m6, m4
  289. pand m6, m14
  290. mova m5, m10
  291. pand m5, m6
  292. LUMA_Q1 m8, m0, m1, m2, m5, m4
  293. mova m5, m11
  294. pand m5, m6
  295. LUMA_Q1 m9, m3, m1, m2, m5, m4
  296. pxor m4, m4
  297. psubw m6, m10
  298. pcmpgtw m4, m14
  299. pandn m4, m7
  300. psubw m6, m11
  301. pand m4, m6
  302. DEBLOCK_P0_Q0 m1, m2, m0, m3, m4, m5, m6
  303. SWAP 0, 8
  304. SWAP 3, 9
  305. %endmacro
  306. %macro DEBLOCK_LUMA_64 0
  307. cglobal deblock_v_luma_10, 5,5,15
  308. %define p2 m8
  309. %define p1 m0
  310. %define p0 m1
  311. %define q0 m2
  312. %define q1 m3
  313. %define q2 m9
  314. %define mask0 m7
  315. %define mask1 m10
  316. %define mask2 m11
  317. shl r2d, 2
  318. shl r3d, 2
  319. LOAD_AB m12, m13, r2d, r3d
  320. mov r2, r0
  321. sub r0, r1
  322. sub r0, r1
  323. sub r0, r1
  324. mov r3, 2
  325. .loop:
  326. mova p2, [r0]
  327. mova p1, [r0+r1]
  328. mova p0, [r0+r1*2]
  329. mova q0, [r2]
  330. mova q1, [r2+r1]
  331. mova q2, [r2+r1*2]
  332. DEBLOCK_LUMA_INTER_SSE2
  333. mova [r0+r1], p1
  334. mova [r0+r1*2], p0
  335. mova [r2], q0
  336. mova [r2+r1], q1
  337. add r0, mmsize
  338. add r2, mmsize
  339. add r4, 2
  340. dec r3
  341. jg .loop
  342. REP_RET
  343. cglobal deblock_h_luma_10, 5,7,15
  344. shl r2d, 2
  345. shl r3d, 2
  346. LOAD_AB m12, m13, r2d, r3d
  347. mov r2, r1
  348. add r2, r1
  349. add r2, r1
  350. mov r5, r0
  351. add r5, r2
  352. mov r6, 2
  353. .loop:
  354. movu m8, [r0-8] ; y q2 q1 q0 p0 p1 p2 x
  355. movu m0, [r0+r1-8]
  356. movu m2, [r0+r1*2-8]
  357. movu m9, [r5-8]
  358. movu m5, [r5+r1-8]
  359. movu m1, [r5+r1*2-8]
  360. movu m3, [r5+r2-8]
  361. movu m7, [r5+r1*4-8]
  362. TRANSPOSE4x4W 8, 0, 2, 9, 10
  363. TRANSPOSE4x4W 5, 1, 3, 7, 10
  364. punpckhqdq m8, m5
  365. SBUTTERFLY qdq, 0, 1, 10
  366. SBUTTERFLY qdq, 2, 3, 10
  367. punpcklqdq m9, m7
  368. DEBLOCK_LUMA_INTER_SSE2
  369. TRANSPOSE4x4W 0, 1, 2, 3, 4
  370. LUMA_H_STORE r5, r2
  371. add r4, 2
  372. lea r0, [r0+r1*8]
  373. lea r5, [r5+r1*8]
  374. dec r6
  375. jg .loop
  376. REP_RET
  377. %endmacro
  378. INIT_XMM sse2
  379. DEBLOCK_LUMA_64
  380. INIT_XMM avx
  381. DEBLOCK_LUMA_64
  382. %endif
  383. %macro SWAPMOVA 2
  384. %ifid %1
  385. SWAP %1, %2
  386. %else
  387. mova %1, %2
  388. %endif
  389. %endmacro
  390. ; in: t0-t2: tmp registers
  391. ; %1=p0 %2=p1 %3=p2 %4=p3 %5=q0 %6=q1 %7=mask0
  392. ; %8=mask1p %9=2 %10=p0' %11=p1' %12=p2'
  393. %macro LUMA_INTRA_P012 12 ; p0..p3 in memory
  394. %if ARCH_X86_64
  395. paddw t0, %3, %2
  396. mova t2, %4
  397. paddw t2, %3
  398. %else
  399. mova t0, %3
  400. mova t2, %4
  401. paddw t0, %2
  402. paddw t2, %3
  403. %endif
  404. paddw t0, %1
  405. paddw t2, t2
  406. paddw t0, %5
  407. paddw t2, %9
  408. paddw t0, %9 ; (p2 + p1 + p0 + q0 + 2)
  409. paddw t2, t0 ; (2*p3 + 3*p2 + p1 + p0 + q0 + 4)
  410. psrlw t2, 3
  411. psrlw t1, t0, 2
  412. psubw t2, %3
  413. psubw t1, %2
  414. pand t2, %8
  415. pand t1, %8
  416. paddw t2, %3
  417. paddw t1, %2
  418. SWAPMOVA %11, t1
  419. psubw t1, t0, %3
  420. paddw t0, t0
  421. psubw t1, %5
  422. psubw t0, %3
  423. paddw t1, %6
  424. paddw t1, %2
  425. paddw t0, %6
  426. psrlw t1, 2 ; (2*p1 + p0 + q1 + 2)/4
  427. psrlw t0, 3 ; (p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4)>>3
  428. pxor t0, t1
  429. pxor t1, %1
  430. pand t0, %8
  431. pand t1, %7
  432. pxor t0, t1
  433. pxor t0, %1
  434. SWAPMOVA %10, t0
  435. SWAPMOVA %12, t2
  436. %endmacro
  437. %macro LUMA_INTRA_INIT 1
  438. %xdefine pad %1*mmsize+((gprsize*3) % mmsize)-(stack_offset&15)
  439. %define t0 m4
  440. %define t1 m5
  441. %define t2 m6
  442. %define t3 m7
  443. %assign i 4
  444. %rep %1
  445. CAT_XDEFINE t, i, [rsp+mmsize*(i-4)]
  446. %assign i i+1
  447. %endrep
  448. SUB rsp, pad
  449. %endmacro
  450. ; in: %1-%3=tmp, %4=p2, %5=q2
  451. %macro LUMA_INTRA_INTER 5
  452. LOAD_AB t0, t1, r2d, r3d
  453. mova %1, t0
  454. LOAD_MASK m0, m1, m2, m3, %1, t1, t0, t2, t3
  455. %if ARCH_X86_64
  456. mova %2, t0 ; mask0
  457. psrlw t3, %1, 2
  458. %else
  459. mova t3, %1
  460. mova %2, t0 ; mask0
  461. psrlw t3, 2
  462. %endif
  463. paddw t3, [pw_2] ; alpha/4+2
  464. DIFF_LT m1, m2, t3, t2, t0 ; t2 = |p0-q0| < alpha/4+2
  465. pand t2, %2
  466. mova t3, %5 ; q2
  467. mova %1, t2 ; mask1
  468. DIFF_LT t3, m2, t1, t2, t0 ; t2 = |q2-q0| < beta
  469. pand t2, %1
  470. mova t3, %4 ; p2
  471. mova %3, t2 ; mask1q
  472. DIFF_LT t3, m1, t1, t2, t0 ; t2 = |p2-p0| < beta
  473. pand t2, %1
  474. mova %1, t2 ; mask1p
  475. %endmacro
  476. %macro LUMA_H_INTRA_LOAD 0
  477. %if mmsize == 8
  478. movu t0, [r0-8]
  479. movu t1, [r0+r1-8]
  480. movu m0, [r0+r1*2-8]
  481. movu m1, [r0+r4-8]
  482. TRANSPOSE4x4W 4, 5, 0, 1, 2
  483. mova t4, t0 ; p3
  484. mova t5, t1 ; p2
  485. movu m2, [r0]
  486. movu m3, [r0+r1]
  487. movu t0, [r0+r1*2]
  488. movu t1, [r0+r4]
  489. TRANSPOSE4x4W 2, 3, 4, 5, 6
  490. mova t6, t0 ; q2
  491. mova t7, t1 ; q3
  492. %else
  493. movu t0, [r0-8]
  494. movu t1, [r0+r1-8]
  495. movu m0, [r0+r1*2-8]
  496. movu m1, [r0+r5-8]
  497. movu m2, [r4-8]
  498. movu m3, [r4+r1-8]
  499. movu t2, [r4+r1*2-8]
  500. movu t3, [r4+r5-8]
  501. TRANSPOSE8x8W 4, 5, 0, 1, 2, 3, 6, 7, t4, t5
  502. mova t4, t0 ; p3
  503. mova t5, t1 ; p2
  504. mova t6, t2 ; q2
  505. mova t7, t3 ; q3
  506. %endif
  507. %endmacro
  508. ; in: %1=q3 %2=q2' %3=q1' %4=q0' %5=p0' %6=p1' %7=p2' %8=p3 %9=tmp
  509. %macro LUMA_H_INTRA_STORE 9
  510. %if mmsize == 8
  511. TRANSPOSE4x4W %1, %2, %3, %4, %9
  512. movq [r0-8], m%1
  513. movq [r0+r1-8], m%2
  514. movq [r0+r1*2-8], m%3
  515. movq [r0+r4-8], m%4
  516. movq m%1, %8
  517. TRANSPOSE4x4W %5, %6, %7, %1, %9
  518. movq [r0], m%5
  519. movq [r0+r1], m%6
  520. movq [r0+r1*2], m%7
  521. movq [r0+r4], m%1
  522. %else
  523. TRANSPOSE2x4x4W %1, %2, %3, %4, %9
  524. movq [r0-8], m%1
  525. movq [r0+r1-8], m%2
  526. movq [r0+r1*2-8], m%3
  527. movq [r0+r5-8], m%4
  528. movhps [r4-8], m%1
  529. movhps [r4+r1-8], m%2
  530. movhps [r4+r1*2-8], m%3
  531. movhps [r4+r5-8], m%4
  532. %ifnum %8
  533. SWAP %1, %8
  534. %else
  535. mova m%1, %8
  536. %endif
  537. TRANSPOSE2x4x4W %5, %6, %7, %1, %9
  538. movq [r0], m%5
  539. movq [r0+r1], m%6
  540. movq [r0+r1*2], m%7
  541. movq [r0+r5], m%1
  542. movhps [r4], m%5
  543. movhps [r4+r1], m%6
  544. movhps [r4+r1*2], m%7
  545. movhps [r4+r5], m%1
  546. %endif
  547. %endmacro
  548. %if ARCH_X86_64
  549. ;-----------------------------------------------------------------------------
  550. ; void deblock_v_luma_intra( uint16_t *pix, int stride, int alpha, int beta )
  551. ;-----------------------------------------------------------------------------
  552. %macro DEBLOCK_LUMA_INTRA_64 0
  553. cglobal deblock_v_luma_intra_10, 4,7,16
  554. %define t0 m1
  555. %define t1 m2
  556. %define t2 m4
  557. %define p2 m8
  558. %define p1 m9
  559. %define p0 m10
  560. %define q0 m11
  561. %define q1 m12
  562. %define q2 m13
  563. %define aa m5
  564. %define bb m14
  565. lea r4, [r1*4]
  566. lea r5, [r1*3] ; 3*stride
  567. neg r4
  568. add r4, r0 ; pix-4*stride
  569. mov r6, 2
  570. mova m0, [pw_2]
  571. shl r2d, 2
  572. shl r3d, 2
  573. LOAD_AB aa, bb, r2d, r3d
  574. .loop:
  575. mova p2, [r4+r1]
  576. mova p1, [r4+2*r1]
  577. mova p0, [r4+r5]
  578. mova q0, [r0]
  579. mova q1, [r0+r1]
  580. mova q2, [r0+2*r1]
  581. LOAD_MASK p1, p0, q0, q1, aa, bb, m3, t0, t1
  582. mova t2, aa
  583. psrlw t2, 2
  584. paddw t2, m0 ; alpha/4+2
  585. DIFF_LT p0, q0, t2, m6, t0 ; m6 = |p0-q0| < alpha/4+2
  586. DIFF_LT p2, p0, bb, t1, t0 ; m7 = |p2-p0| < beta
  587. DIFF_LT q2, q0, bb, m7, t0 ; t1 = |q2-q0| < beta
  588. pand m6, m3
  589. pand m7, m6
  590. pand m6, t1
  591. LUMA_INTRA_P012 p0, p1, p2, [r4], q0, q1, m3, m6, m0, [r4+r5], [r4+2*r1], [r4+r1]
  592. LUMA_INTRA_P012 q0, q1, q2, [r0+r5], p0, p1, m3, m7, m0, [r0], [r0+r1], [r0+2*r1]
  593. add r0, mmsize
  594. add r4, mmsize
  595. dec r6
  596. jg .loop
  597. REP_RET
  598. ;-----------------------------------------------------------------------------
  599. ; void deblock_h_luma_intra( uint16_t *pix, int stride, int alpha, int beta )
  600. ;-----------------------------------------------------------------------------
  601. cglobal deblock_h_luma_intra_10, 4,7,16
  602. %define t0 m15
  603. %define t1 m14
  604. %define t2 m2
  605. %define q3 m5
  606. %define q2 m8
  607. %define q1 m9
  608. %define q0 m10
  609. %define p0 m11
  610. %define p1 m12
  611. %define p2 m13
  612. %define p3 m4
  613. %define spill [rsp]
  614. %assign pad 24-(stack_offset&15)
  615. SUB rsp, pad
  616. lea r4, [r1*4]
  617. lea r5, [r1*3] ; 3*stride
  618. add r4, r0 ; pix+4*stride
  619. mov r6, 2
  620. mova m0, [pw_2]
  621. shl r2d, 2
  622. shl r3d, 2
  623. .loop:
  624. movu q3, [r0-8]
  625. movu q2, [r0+r1-8]
  626. movu q1, [r0+r1*2-8]
  627. movu q0, [r0+r5-8]
  628. movu p0, [r4-8]
  629. movu p1, [r4+r1-8]
  630. movu p2, [r4+r1*2-8]
  631. movu p3, [r4+r5-8]
  632. TRANSPOSE8x8W 5, 8, 9, 10, 11, 12, 13, 4, 1
  633. LOAD_AB m1, m2, r2d, r3d
  634. LOAD_MASK q1, q0, p0, p1, m1, m2, m3, t0, t1
  635. psrlw m1, 2
  636. paddw m1, m0 ; alpha/4+2
  637. DIFF_LT p0, q0, m1, m6, t0 ; m6 = |p0-q0| < alpha/4+2
  638. DIFF_LT q2, q0, m2, t1, t0 ; t1 = |q2-q0| < beta
  639. DIFF_LT p0, p2, m2, m7, t0 ; m7 = |p2-p0| < beta
  640. pand m6, m3
  641. pand m7, m6
  642. pand m6, t1
  643. mova spill, q3
  644. LUMA_INTRA_P012 q0, q1, q2, q3, p0, p1, m3, m6, m0, m5, m1, q2
  645. LUMA_INTRA_P012 p0, p1, p2, p3, q0, q1, m3, m7, m0, p0, m6, p2
  646. mova m7, spill
  647. LUMA_H_INTRA_STORE 7, 8, 1, 5, 11, 6, 13, 4, 14
  648. lea r0, [r0+r1*8]
  649. lea r4, [r4+r1*8]
  650. dec r6
  651. jg .loop
  652. ADD rsp, pad
  653. RET
  654. %endmacro
  655. INIT_XMM sse2
  656. DEBLOCK_LUMA_INTRA_64
  657. INIT_XMM avx
  658. DEBLOCK_LUMA_INTRA_64
  659. %endif
  660. %macro DEBLOCK_LUMA_INTRA 0
  661. ;-----------------------------------------------------------------------------
  662. ; void deblock_v_luma_intra( uint16_t *pix, int stride, int alpha, int beta )
  663. ;-----------------------------------------------------------------------------
  664. cglobal deblock_v_luma_intra_10, 4,7,8*(mmsize/16)
  665. LUMA_INTRA_INIT 3
  666. lea r4, [r1*4]
  667. lea r5, [r1*3]
  668. neg r4
  669. add r4, r0
  670. mov r6, 32/mmsize
  671. shl r2d, 2
  672. shl r3d, 2
  673. .loop:
  674. mova m0, [r4+r1*2] ; p1
  675. mova m1, [r4+r5] ; p0
  676. mova m2, [r0] ; q0
  677. mova m3, [r0+r1] ; q1
  678. LUMA_INTRA_INTER t4, t5, t6, [r4+r1], [r0+r1*2]
  679. LUMA_INTRA_P012 m1, m0, t3, [r4], m2, m3, t5, t4, [pw_2], [r4+r5], [r4+2*r1], [r4+r1]
  680. mova t3, [r0+r1*2] ; q2
  681. LUMA_INTRA_P012 m2, m3, t3, [r0+r5], m1, m0, t5, t6, [pw_2], [r0], [r0+r1], [r0+2*r1]
  682. add r0, mmsize
  683. add r4, mmsize
  684. dec r6
  685. jg .loop
  686. ADD rsp, pad
  687. RET
  688. ;-----------------------------------------------------------------------------
  689. ; void deblock_h_luma_intra( uint16_t *pix, int stride, int alpha, int beta )
  690. ;-----------------------------------------------------------------------------
  691. cglobal deblock_h_luma_intra_10, 4,7,8*(mmsize/16)
  692. LUMA_INTRA_INIT 8
  693. %if mmsize == 8
  694. lea r4, [r1*3]
  695. mov r5, 32/mmsize
  696. %else
  697. lea r4, [r1*4]
  698. lea r5, [r1*3] ; 3*stride
  699. add r4, r0 ; pix+4*stride
  700. mov r6, 32/mmsize
  701. %endif
  702. shl r2d, 2
  703. shl r3d, 2
  704. .loop:
  705. LUMA_H_INTRA_LOAD
  706. LUMA_INTRA_INTER t8, t9, t10, t5, t6
  707. LUMA_INTRA_P012 m1, m0, t3, t4, m2, m3, t9, t8, [pw_2], t8, t5, t11
  708. mova t3, t6 ; q2
  709. LUMA_INTRA_P012 m2, m3, t3, t7, m1, m0, t9, t10, [pw_2], m4, t6, m5
  710. mova m2, t4
  711. mova m0, t11
  712. mova m1, t5
  713. mova m3, t8
  714. mova m6, t6
  715. LUMA_H_INTRA_STORE 2, 0, 1, 3, 4, 6, 5, t7, 7
  716. lea r0, [r0+r1*(mmsize/2)]
  717. %if mmsize == 8
  718. dec r5
  719. %else
  720. lea r4, [r4+r1*(mmsize/2)]
  721. dec r6
  722. %endif
  723. jg .loop
  724. ADD rsp, pad
  725. RET
  726. %endmacro
  727. %if ARCH_X86_64 == 0
  728. INIT_MMX mmx2
  729. DEBLOCK_LUMA
  730. DEBLOCK_LUMA_INTRA
  731. INIT_XMM sse2
  732. DEBLOCK_LUMA
  733. DEBLOCK_LUMA_INTRA
  734. INIT_XMM avx
  735. DEBLOCK_LUMA
  736. DEBLOCK_LUMA_INTRA
  737. %endif
  738. ; in: %1=p0, %2=q0, %3=p1, %4=q1, %5=mask, %6=tmp, %7=tmp
  739. ; out: %1=p0', %2=q0'
  740. %macro CHROMA_DEBLOCK_P0_Q0_INTRA 7
  741. mova %6, [pw_2]
  742. paddw %6, %3
  743. paddw %6, %4
  744. paddw %7, %6, %2
  745. paddw %6, %1
  746. paddw %6, %3
  747. paddw %7, %4
  748. psraw %6, 2
  749. psraw %7, 2
  750. psubw %6, %1
  751. psubw %7, %2
  752. pand %6, %5
  753. pand %7, %5
  754. paddw %1, %6
  755. paddw %2, %7
  756. %endmacro
  757. %macro CHROMA_V_LOAD 1
  758. mova m0, [r0] ; p1
  759. mova m1, [r0+r1] ; p0
  760. mova m2, [%1] ; q0
  761. mova m3, [%1+r1] ; q1
  762. %endmacro
  763. %macro CHROMA_V_STORE 0
  764. mova [r0+1*r1], m1
  765. mova [r0+2*r1], m2
  766. %endmacro
  767. %macro CHROMA_V_LOAD_TC 2
  768. movd %1, [%2]
  769. punpcklbw %1, %1
  770. punpcklwd %1, %1
  771. psraw %1, 6
  772. %endmacro
  773. %macro DEBLOCK_CHROMA 0
  774. ;-----------------------------------------------------------------------------
  775. ; void deblock_v_chroma( uint16_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
  776. ;-----------------------------------------------------------------------------
  777. cglobal deblock_v_chroma_10, 5,7-(mmsize/16),8*(mmsize/16)
  778. mov r5, r0
  779. sub r0, r1
  780. sub r0, r1
  781. shl r2d, 2
  782. shl r3d, 2
  783. %if mmsize < 16
  784. mov r6, 16/mmsize
  785. .loop:
  786. %endif
  787. CHROMA_V_LOAD r5
  788. LOAD_AB m4, m5, r2d, r3d
  789. LOAD_MASK m0, m1, m2, m3, m4, m5, m7, m6, m4
  790. pxor m4, m4
  791. CHROMA_V_LOAD_TC m6, r4
  792. psubw m6, [pw_3]
  793. pmaxsw m6, m4
  794. pand m7, m6
  795. DEBLOCK_P0_Q0 m1, m2, m0, m3, m7, m5, m6
  796. CHROMA_V_STORE
  797. %if mmsize < 16
  798. add r0, mmsize
  799. add r5, mmsize
  800. add r4, mmsize/4
  801. dec r6
  802. jg .loop
  803. REP_RET
  804. %else
  805. RET
  806. %endif
  807. ;-----------------------------------------------------------------------------
  808. ; void deblock_v_chroma_intra( uint16_t *pix, int stride, int alpha, int beta )
  809. ;-----------------------------------------------------------------------------
  810. cglobal deblock_v_chroma_intra_10, 4,6-(mmsize/16),8*(mmsize/16)
  811. mov r4, r0
  812. sub r0, r1
  813. sub r0, r1
  814. shl r2d, 2
  815. shl r3d, 2
  816. %if mmsize < 16
  817. mov r5, 16/mmsize
  818. .loop:
  819. %endif
  820. CHROMA_V_LOAD r4
  821. LOAD_AB m4, m5, r2d, r3d
  822. LOAD_MASK m0, m1, m2, m3, m4, m5, m7, m6, m4
  823. CHROMA_DEBLOCK_P0_Q0_INTRA m1, m2, m0, m3, m7, m5, m6
  824. CHROMA_V_STORE
  825. %if mmsize < 16
  826. add r0, mmsize
  827. add r4, mmsize
  828. dec r5
  829. jg .loop
  830. REP_RET
  831. %else
  832. RET
  833. %endif
  834. %endmacro
  835. %if ARCH_X86_64 == 0
  836. INIT_MMX mmx2
  837. DEBLOCK_CHROMA
  838. %endif
  839. INIT_XMM sse2
  840. DEBLOCK_CHROMA
  841. INIT_XMM avx
  842. DEBLOCK_CHROMA