You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

914 lines
23KB

  1. ;*****************************************************************************
  2. ;* MMX/SSE2/AVX-optimized H.264 deblocking code
  3. ;*****************************************************************************
  4. ;* Copyright (C) 2005-2011 x264 project
  5. ;*
  6. ;* Authors: Loren Merritt <lorenm@u.washington.edu>
  7. ;* Jason Garrett-Glaser <darkshikari@gmail.com>
  8. ;* Oskar Arvidsson <oskar@irock.se>
  9. ;*
  10. ;* This file is part of Libav.
  11. ;*
  12. ;* Libav is free software; you can redistribute it and/or
  13. ;* modify it under the terms of the GNU Lesser General Public
  14. ;* License as published by the Free Software Foundation; either
  15. ;* version 2.1 of the License, or (at your option) any later version.
  16. ;*
  17. ;* Libav is distributed in the hope that it will be useful,
  18. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  20. ;* Lesser General Public License for more details.
  21. ;*
  22. ;* You should have received a copy of the GNU Lesser General Public
  23. ;* License along with Libav; if not, write to the Free Software
  24. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  25. ;******************************************************************************
  26. %include "x86inc.asm"
  27. %include "x86util.asm"
  28. SECTION .text
  29. cextern pb_0
  30. cextern pb_1
  31. cextern pb_3
  32. cextern pb_A1
  33. ; expands to [base],...,[base+7*stride]
  34. %define PASS8ROWS(base, base3, stride, stride3) \
  35. [base], [base+stride], [base+stride*2], [base3], \
  36. [base3+stride], [base3+stride*2], [base3+stride3], [base3+stride*4]
  37. %define PASS8ROWS(base, base3, stride, stride3, offset) \
  38. PASS8ROWS(base+offset, base3+offset, stride, stride3)
  39. ; in: 8 rows of 4 bytes in %4..%11
  40. ; out: 4 rows of 8 bytes in m0..m3
  41. %macro TRANSPOSE4x8_LOAD 11
  42. movh m0, %4
  43. movh m2, %5
  44. movh m1, %6
  45. movh m3, %7
  46. punpckl%1 m0, m2
  47. punpckl%1 m1, m3
  48. mova m2, m0
  49. punpckl%2 m0, m1
  50. punpckh%2 m2, m1
  51. movh m4, %8
  52. movh m6, %9
  53. movh m5, %10
  54. movh m7, %11
  55. punpckl%1 m4, m6
  56. punpckl%1 m5, m7
  57. mova m6, m4
  58. punpckl%2 m4, m5
  59. punpckh%2 m6, m5
  60. punpckh%3 m1, m0, m4
  61. punpckh%3 m3, m2, m6
  62. punpckl%3 m0, m4
  63. punpckl%3 m2, m6
  64. %endmacro
  65. ; in: 4 rows of 8 bytes in m0..m3
  66. ; out: 8 rows of 4 bytes in %1..%8
  67. %macro TRANSPOSE8x4B_STORE 8
  68. punpckhdq m4, m0, m0
  69. punpckhdq m5, m1, m1
  70. punpckhdq m6, m2, m2
  71. punpcklbw m0, m1
  72. punpcklbw m2, m3
  73. punpcklwd m1, m0, m2
  74. punpckhwd m0, m2
  75. movh %1, m1
  76. punpckhdq m1, m1
  77. movh %2, m1
  78. movh %3, m0
  79. punpckhdq m0, m0
  80. movh %4, m0
  81. punpckhdq m3, m3
  82. punpcklbw m4, m5
  83. punpcklbw m6, m3
  84. punpcklwd m5, m4, m6
  85. punpckhwd m4, m6
  86. movh %5, m5
  87. punpckhdq m5, m5
  88. movh %6, m5
  89. movh %7, m4
  90. punpckhdq m4, m4
  91. movh %8, m4
  92. %endmacro
  93. %macro TRANSPOSE4x8B_LOAD 8
  94. TRANSPOSE4x8_LOAD bw, wd, dq, %1, %2, %3, %4, %5, %6, %7, %8
  95. %endmacro
  96. %macro SBUTTERFLY3 4
  97. punpckh%1 %4, %2, %3
  98. punpckl%1 %2, %3
  99. %endmacro
  100. ; in: 8 rows of 8 (only the middle 6 pels are used) in %1..%8
  101. ; out: 6 rows of 8 in [%9+0*16] .. [%9+5*16]
  102. %macro TRANSPOSE6x8_MEM 9
  103. RESET_MM_PERMUTATION
  104. movq m0, %1
  105. movq m1, %2
  106. movq m2, %3
  107. movq m3, %4
  108. movq m4, %5
  109. movq m5, %6
  110. movq m6, %7
  111. SBUTTERFLY bw, 0, 1, 7
  112. SBUTTERFLY bw, 2, 3, 7
  113. SBUTTERFLY bw, 4, 5, 7
  114. movq [%9+0x10], m3
  115. SBUTTERFLY3 bw, m6, %8, m7
  116. SBUTTERFLY wd, 0, 2, 3
  117. SBUTTERFLY wd, 4, 6, 3
  118. punpckhdq m0, m4
  119. movq [%9+0x00], m0
  120. SBUTTERFLY3 wd, m1, [%9+0x10], m3
  121. SBUTTERFLY wd, 5, 7, 0
  122. SBUTTERFLY dq, 1, 5, 0
  123. SBUTTERFLY dq, 2, 6, 0
  124. punpckldq m3, m7
  125. movq [%9+0x10], m2
  126. movq [%9+0x20], m6
  127. movq [%9+0x30], m1
  128. movq [%9+0x40], m5
  129. movq [%9+0x50], m3
  130. RESET_MM_PERMUTATION
  131. %endmacro
  132. ; in: 8 rows of 8 in %1..%8
  133. ; out: 8 rows of 8 in %9..%16
  134. %macro TRANSPOSE8x8_MEM 16
  135. RESET_MM_PERMUTATION
  136. movq m0, %1
  137. movq m1, %2
  138. movq m2, %3
  139. movq m3, %4
  140. movq m4, %5
  141. movq m5, %6
  142. movq m6, %7
  143. SBUTTERFLY bw, 0, 1, 7
  144. SBUTTERFLY bw, 2, 3, 7
  145. SBUTTERFLY bw, 4, 5, 7
  146. SBUTTERFLY3 bw, m6, %8, m7
  147. movq %9, m5
  148. SBUTTERFLY wd, 0, 2, 5
  149. SBUTTERFLY wd, 4, 6, 5
  150. SBUTTERFLY wd, 1, 3, 5
  151. movq %11, m6
  152. movq m6, %9
  153. SBUTTERFLY wd, 6, 7, 5
  154. SBUTTERFLY dq, 0, 4, 5
  155. SBUTTERFLY dq, 1, 6, 5
  156. movq %9, m0
  157. movq %10, m4
  158. movq %13, m1
  159. movq %14, m6
  160. SBUTTERFLY3 dq, m2, %11, m0
  161. SBUTTERFLY dq, 3, 7, 4
  162. movq %11, m2
  163. movq %12, m0
  164. movq %15, m3
  165. movq %16, m7
  166. RESET_MM_PERMUTATION
  167. %endmacro
  168. ; out: %4 = |%1-%2|>%3
  169. ; clobbers: %5
  170. %macro DIFF_GT 5
  171. %if avx_enabled == 0
  172. mova %5, %2
  173. mova %4, %1
  174. psubusb %5, %1
  175. psubusb %4, %2
  176. %else
  177. psubusb %5, %2, %1
  178. psubusb %4, %1, %2
  179. %endif
  180. por %4, %5
  181. psubusb %4, %3
  182. %endmacro
  183. ; out: %4 = |%1-%2|>%3
  184. ; clobbers: %5
  185. %macro DIFF_GT2 5
  186. %if ARCH_X86_64
  187. psubusb %5, %2, %1
  188. psubusb %4, %1, %2
  189. %else
  190. mova %5, %2
  191. mova %4, %1
  192. psubusb %5, %1
  193. psubusb %4, %2
  194. %endif
  195. psubusb %5, %3
  196. psubusb %4, %3
  197. pcmpeqb %4, %5
  198. %endmacro
  199. ; in: m0=p1 m1=p0 m2=q0 m3=q1 %1=alpha-1 %2=beta-1
  200. ; out: m5=beta-1, m7=mask, %3=alpha-1
  201. ; clobbers: m4,m6
  202. %macro LOAD_MASK 2-3
  203. movd m4, %1
  204. movd m5, %2
  205. SPLATW m4, m4
  206. SPLATW m5, m5
  207. packuswb m4, m4 ; 16x alpha-1
  208. packuswb m5, m5 ; 16x beta-1
  209. %if %0>2
  210. mova %3, m4
  211. %endif
  212. DIFF_GT m1, m2, m4, m7, m6 ; |p0-q0| > alpha-1
  213. DIFF_GT m0, m1, m5, m4, m6 ; |p1-p0| > beta-1
  214. por m7, m4
  215. DIFF_GT m3, m2, m5, m4, m6 ; |q1-q0| > beta-1
  216. por m7, m4
  217. pxor m6, m6
  218. pcmpeqb m7, m6
  219. %endmacro
  220. ; in: m0=p1 m1=p0 m2=q0 m3=q1 m7=(tc&mask)
  221. ; out: m1=p0' m2=q0'
  222. ; clobbers: m0,3-6
  223. %macro DEBLOCK_P0_Q0 0
  224. pcmpeqb m4, m4
  225. pxor m5, m1, m2 ; p0^q0
  226. pxor m3, m4
  227. pand m5, [pb_1] ; (p0^q0)&1
  228. pavgb m3, m0 ; (p1 - q1 + 256)>>1
  229. pxor m4, m1
  230. pavgb m3, [pb_3] ; (((p1 - q1 + 256)>>1)+4)>>1 = 64+2+(p1-q1)>>2
  231. pavgb m4, m2 ; (q0 - p0 + 256)>>1
  232. pavgb m3, m5
  233. mova m6, [pb_A1]
  234. paddusb m3, m4 ; d+128+33
  235. psubusb m6, m3
  236. psubusb m3, [pb_A1]
  237. pminub m6, m7
  238. pminub m3, m7
  239. psubusb m1, m6
  240. psubusb m2, m3
  241. paddusb m1, m3
  242. paddusb m2, m6
  243. %endmacro
  244. ; in: m1=p0 m2=q0
  245. ; %1=p1 %2=q2 %3=[q2] %4=[q1] %5=tc0 %6=tmp
  246. ; out: [q1] = clip( (q2+((p0+q0+1)>>1))>>1, q1-tc0, q1+tc0 )
  247. ; clobbers: q2, tmp, tc0
  248. %macro LUMA_Q1 6
  249. pavgb %6, m1, m2
  250. pavgb %2, %6 ; avg(p2,avg(p0,q0))
  251. pxor %6, %3
  252. pand %6, [pb_1] ; (p2^avg(p0,q0))&1
  253. psubusb %2, %6 ; (p2+((p0+q0+1)>>1))>>1
  254. psubusb %6, %1, %5
  255. paddusb %5, %1
  256. pmaxub %2, %6
  257. pminub %2, %5
  258. mova %4, %2
  259. %endmacro
  260. %if ARCH_X86_64
  261. ;-----------------------------------------------------------------------------
  262. ; void deblock_v_luma( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
  263. ;-----------------------------------------------------------------------------
  264. %macro DEBLOCK_LUMA 1
  265. cglobal deblock_v_luma_8_%1, 5,5,10
  266. movd m8, [r4] ; tc0
  267. lea r4, [r1*3]
  268. dec r2d ; alpha-1
  269. neg r4
  270. dec r3d ; beta-1
  271. add r4, r0 ; pix-3*stride
  272. mova m0, [r4+r1] ; p1
  273. mova m1, [r4+2*r1] ; p0
  274. mova m2, [r0] ; q0
  275. mova m3, [r0+r1] ; q1
  276. LOAD_MASK r2d, r3d
  277. punpcklbw m8, m8
  278. punpcklbw m8, m8 ; tc = 4x tc0[3], 4x tc0[2], 4x tc0[1], 4x tc0[0]
  279. pcmpeqb m9, m9
  280. pcmpeqb m9, m8
  281. pandn m9, m7
  282. pand m8, m9
  283. movdqa m3, [r4] ; p2
  284. DIFF_GT2 m1, m3, m5, m6, m7 ; |p2-p0| > beta-1
  285. pand m6, m9
  286. psubb m7, m8, m6
  287. pand m6, m8
  288. LUMA_Q1 m0, m3, [r4], [r4+r1], m6, m4
  289. movdqa m4, [r0+2*r1] ; q2
  290. DIFF_GT2 m2, m4, m5, m6, m3 ; |q2-q0| > beta-1
  291. pand m6, m9
  292. pand m8, m6
  293. psubb m7, m6
  294. mova m3, [r0+r1]
  295. LUMA_Q1 m3, m4, [r0+2*r1], [r0+r1], m8, m6
  296. DEBLOCK_P0_Q0
  297. mova [r4+2*r1], m1
  298. mova [r0], m2
  299. RET
  300. ;-----------------------------------------------------------------------------
  301. ; void deblock_h_luma( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
  302. ;-----------------------------------------------------------------------------
  303. INIT_MMX
  304. cglobal deblock_h_luma_8_%1, 5,9
  305. movsxd r7, r1d
  306. lea r8, [r7+r7*2]
  307. lea r6, [r0-4]
  308. lea r5, [r0-4+r8]
  309. %if WIN64
  310. sub rsp, 0x98
  311. %define pix_tmp rsp+0x30
  312. %else
  313. sub rsp, 0x68
  314. %define pix_tmp rsp
  315. %endif
  316. ; transpose 6x16 -> tmp space
  317. TRANSPOSE6x8_MEM PASS8ROWS(r6, r5, r7, r8), pix_tmp
  318. lea r6, [r6+r7*8]
  319. lea r5, [r5+r7*8]
  320. TRANSPOSE6x8_MEM PASS8ROWS(r6, r5, r7, r8), pix_tmp+8
  321. ; vertical filter
  322. ; alpha, beta, tc0 are still in r2d, r3d, r4
  323. ; don't backup r6, r5, r7, r8 because deblock_v_luma_sse2 doesn't use them
  324. lea r0, [pix_tmp+0x30]
  325. mov r1d, 0x10
  326. %if WIN64
  327. mov [rsp+0x20], r4
  328. %endif
  329. call deblock_v_luma_8_%1
  330. ; transpose 16x4 -> original space (only the middle 4 rows were changed by the filter)
  331. add r6, 2
  332. add r5, 2
  333. movq m0, [pix_tmp+0x18]
  334. movq m1, [pix_tmp+0x28]
  335. movq m2, [pix_tmp+0x38]
  336. movq m3, [pix_tmp+0x48]
  337. TRANSPOSE8x4B_STORE PASS8ROWS(r6, r5, r7, r8)
  338. shl r7, 3
  339. sub r6, r7
  340. sub r5, r7
  341. shr r7, 3
  342. movq m0, [pix_tmp+0x10]
  343. movq m1, [pix_tmp+0x20]
  344. movq m2, [pix_tmp+0x30]
  345. movq m3, [pix_tmp+0x40]
  346. TRANSPOSE8x4B_STORE PASS8ROWS(r6, r5, r7, r8)
  347. %if WIN64
  348. add rsp, 0x98
  349. %else
  350. add rsp, 0x68
  351. %endif
  352. RET
  353. %endmacro
  354. INIT_XMM
  355. DEBLOCK_LUMA sse2
  356. INIT_AVX
  357. DEBLOCK_LUMA avx
  358. %else
  359. %macro DEBLOCK_LUMA 3
  360. ;-----------------------------------------------------------------------------
  361. ; void deblock_v8_luma( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
  362. ;-----------------------------------------------------------------------------
  363. cglobal deblock_%2_luma_8_%1, 5,5
  364. lea r4, [r1*3]
  365. dec r2 ; alpha-1
  366. neg r4
  367. dec r3 ; beta-1
  368. add r4, r0 ; pix-3*stride
  369. %assign pad 2*%3+12-(stack_offset&15)
  370. SUB esp, pad
  371. mova m0, [r4+r1] ; p1
  372. mova m1, [r4+2*r1] ; p0
  373. mova m2, [r0] ; q0
  374. mova m3, [r0+r1] ; q1
  375. LOAD_MASK r2, r3
  376. mov r3, r4mp
  377. pcmpeqb m3, m3
  378. movd m4, [r3] ; tc0
  379. punpcklbw m4, m4
  380. punpcklbw m4, m4 ; tc = 4x tc0[3], 4x tc0[2], 4x tc0[1], 4x tc0[0]
  381. mova [esp+%3], m4 ; tc
  382. pcmpgtb m4, m3
  383. mova m3, [r4] ; p2
  384. pand m4, m7
  385. mova [esp], m4 ; mask
  386. DIFF_GT2 m1, m3, m5, m6, m7 ; |p2-p0| > beta-1
  387. pand m6, m4
  388. pand m4, [esp+%3] ; tc
  389. psubb m7, m4, m6
  390. pand m6, m4
  391. LUMA_Q1 m0, m3, [r4], [r4+r1], m6, m4
  392. mova m4, [r0+2*r1] ; q2
  393. DIFF_GT2 m2, m4, m5, m6, m3 ; |q2-q0| > beta-1
  394. pand m6, [esp] ; mask
  395. mova m5, [esp+%3] ; tc
  396. psubb m7, m6
  397. pand m5, m6
  398. mova m3, [r0+r1]
  399. LUMA_Q1 m3, m4, [r0+2*r1], [r0+r1], m5, m6
  400. DEBLOCK_P0_Q0
  401. mova [r4+2*r1], m1
  402. mova [r0], m2
  403. ADD esp, pad
  404. RET
  405. ;-----------------------------------------------------------------------------
  406. ; void deblock_h_luma( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
  407. ;-----------------------------------------------------------------------------
  408. INIT_MMX
  409. cglobal deblock_h_luma_8_%1, 0,5
  410. mov r0, r0mp
  411. mov r3, r1m
  412. lea r4, [r3*3]
  413. sub r0, 4
  414. lea r1, [r0+r4]
  415. %assign pad 0x78-(stack_offset&15)
  416. SUB esp, pad
  417. %define pix_tmp esp+12
  418. ; transpose 6x16 -> tmp space
  419. TRANSPOSE6x8_MEM PASS8ROWS(r0, r1, r3, r4), pix_tmp
  420. lea r0, [r0+r3*8]
  421. lea r1, [r1+r3*8]
  422. TRANSPOSE6x8_MEM PASS8ROWS(r0, r1, r3, r4), pix_tmp+8
  423. ; vertical filter
  424. lea r0, [pix_tmp+0x30]
  425. PUSH dword r4m
  426. PUSH dword r3m
  427. PUSH dword r2m
  428. PUSH dword 16
  429. PUSH dword r0
  430. call deblock_%2_luma_8_%1
  431. %ifidn %2, v8
  432. add dword [esp ], 8 ; pix_tmp+0x38
  433. add dword [esp+16], 2 ; tc0+2
  434. call deblock_%2_luma_8_%1
  435. %endif
  436. ADD esp, 20
  437. ; transpose 16x4 -> original space (only the middle 4 rows were changed by the filter)
  438. mov r0, r0mp
  439. sub r0, 2
  440. movq m0, [pix_tmp+0x10]
  441. movq m1, [pix_tmp+0x20]
  442. lea r1, [r0+r4]
  443. movq m2, [pix_tmp+0x30]
  444. movq m3, [pix_tmp+0x40]
  445. TRANSPOSE8x4B_STORE PASS8ROWS(r0, r1, r3, r4)
  446. lea r0, [r0+r3*8]
  447. lea r1, [r1+r3*8]
  448. movq m0, [pix_tmp+0x18]
  449. movq m1, [pix_tmp+0x28]
  450. movq m2, [pix_tmp+0x38]
  451. movq m3, [pix_tmp+0x48]
  452. TRANSPOSE8x4B_STORE PASS8ROWS(r0, r1, r3, r4)
  453. ADD esp, pad
  454. RET
  455. %endmacro ; DEBLOCK_LUMA
  456. INIT_MMX
  457. DEBLOCK_LUMA mmxext, v8, 8
  458. INIT_XMM
  459. DEBLOCK_LUMA sse2, v, 16
  460. INIT_AVX
  461. DEBLOCK_LUMA avx, v, 16
  462. %endif ; ARCH
  463. %macro LUMA_INTRA_P012 4 ; p0..p3 in memory
  464. %if ARCH_X86_64
  465. pavgb t0, p2, p1
  466. pavgb t1, p0, q0
  467. %else
  468. mova t0, p2
  469. mova t1, p0
  470. pavgb t0, p1
  471. pavgb t1, q0
  472. %endif
  473. pavgb t0, t1 ; ((p2+p1+1)/2 + (p0+q0+1)/2 + 1)/2
  474. mova t5, t1
  475. %if ARCH_X86_64
  476. paddb t2, p2, p1
  477. paddb t3, p0, q0
  478. %else
  479. mova t2, p2
  480. mova t3, p0
  481. paddb t2, p1
  482. paddb t3, q0
  483. %endif
  484. paddb t2, t3
  485. mova t3, t2
  486. mova t4, t2
  487. psrlw t2, 1
  488. pavgb t2, mpb_0
  489. pxor t2, t0
  490. pand t2, mpb_1
  491. psubb t0, t2 ; p1' = (p2+p1+p0+q0+2)/4;
  492. %if ARCH_X86_64
  493. pavgb t1, p2, q1
  494. psubb t2, p2, q1
  495. %else
  496. mova t1, p2
  497. mova t2, p2
  498. pavgb t1, q1
  499. psubb t2, q1
  500. %endif
  501. paddb t3, t3
  502. psubb t3, t2 ; p2+2*p1+2*p0+2*q0+q1
  503. pand t2, mpb_1
  504. psubb t1, t2
  505. pavgb t1, p1
  506. pavgb t1, t5 ; (((p2+q1)/2 + p1+1)/2 + (p0+q0+1)/2 + 1)/2
  507. psrlw t3, 2
  508. pavgb t3, mpb_0
  509. pxor t3, t1
  510. pand t3, mpb_1
  511. psubb t1, t3 ; p0'a = (p2+2*p1+2*p0+2*q0+q1+4)/8
  512. pxor t3, p0, q1
  513. pavgb t2, p0, q1
  514. pand t3, mpb_1
  515. psubb t2, t3
  516. pavgb t2, p1 ; p0'b = (2*p1+p0+q0+2)/4
  517. pxor t1, t2
  518. pxor t2, p0
  519. pand t1, mask1p
  520. pand t2, mask0
  521. pxor t1, t2
  522. pxor t1, p0
  523. mova %1, t1 ; store p0
  524. mova t1, %4 ; p3
  525. paddb t2, t1, p2
  526. pavgb t1, p2
  527. pavgb t1, t0 ; (p3+p2+1)/2 + (p2+p1+p0+q0+2)/4
  528. paddb t2, t2
  529. paddb t2, t4 ; 2*p3+3*p2+p1+p0+q0
  530. psrlw t2, 2
  531. pavgb t2, mpb_0
  532. pxor t2, t1
  533. pand t2, mpb_1
  534. psubb t1, t2 ; p2' = (2*p3+3*p2+p1+p0+q0+4)/8
  535. pxor t0, p1
  536. pxor t1, p2
  537. pand t0, mask1p
  538. pand t1, mask1p
  539. pxor t0, p1
  540. pxor t1, p2
  541. mova %2, t0 ; store p1
  542. mova %3, t1 ; store p2
  543. %endmacro
  544. %macro LUMA_INTRA_SWAP_PQ 0
  545. %define q1 m0
  546. %define q0 m1
  547. %define p0 m2
  548. %define p1 m3
  549. %define p2 q2
  550. %define mask1p mask1q
  551. %endmacro
  552. %macro DEBLOCK_LUMA_INTRA 2
  553. %define p1 m0
  554. %define p0 m1
  555. %define q0 m2
  556. %define q1 m3
  557. %define t0 m4
  558. %define t1 m5
  559. %define t2 m6
  560. %define t3 m7
  561. %if ARCH_X86_64
  562. %define p2 m8
  563. %define q2 m9
  564. %define t4 m10
  565. %define t5 m11
  566. %define mask0 m12
  567. %define mask1p m13
  568. %define mask1q [rsp-24]
  569. %define mpb_0 m14
  570. %define mpb_1 m15
  571. %else
  572. %define spill(x) [esp+16*x+((stack_offset+4)&15)]
  573. %define p2 [r4+r1]
  574. %define q2 [r0+2*r1]
  575. %define t4 spill(0)
  576. %define t5 spill(1)
  577. %define mask0 spill(2)
  578. %define mask1p spill(3)
  579. %define mask1q spill(4)
  580. %define mpb_0 [pb_0]
  581. %define mpb_1 [pb_1]
  582. %endif
  583. ;-----------------------------------------------------------------------------
  584. ; void deblock_v_luma_intra( uint8_t *pix, int stride, int alpha, int beta )
  585. ;-----------------------------------------------------------------------------
  586. cglobal deblock_%2_luma_intra_8_%1, 4,6,16
  587. %if ARCH_X86_64 == 0
  588. sub esp, 0x60
  589. %endif
  590. lea r4, [r1*4]
  591. lea r5, [r1*3] ; 3*stride
  592. dec r2d ; alpha-1
  593. jl .end
  594. neg r4
  595. dec r3d ; beta-1
  596. jl .end
  597. add r4, r0 ; pix-4*stride
  598. mova p1, [r4+2*r1]
  599. mova p0, [r4+r5]
  600. mova q0, [r0]
  601. mova q1, [r0+r1]
  602. %if ARCH_X86_64
  603. pxor mpb_0, mpb_0
  604. mova mpb_1, [pb_1]
  605. LOAD_MASK r2d, r3d, t5 ; m5=beta-1, t5=alpha-1, m7=mask0
  606. SWAP 7, 12 ; m12=mask0
  607. pavgb t5, mpb_0
  608. pavgb t5, mpb_1 ; alpha/4+1
  609. movdqa p2, [r4+r1]
  610. movdqa q2, [r0+2*r1]
  611. DIFF_GT2 p0, q0, t5, t0, t3 ; t0 = |p0-q0| > alpha/4+1
  612. DIFF_GT2 p0, p2, m5, t2, t5 ; mask1 = |p2-p0| > beta-1
  613. DIFF_GT2 q0, q2, m5, t4, t5 ; t4 = |q2-q0| > beta-1
  614. pand t0, mask0
  615. pand t4, t0
  616. pand t2, t0
  617. mova mask1q, t4
  618. mova mask1p, t2
  619. %else
  620. LOAD_MASK r2d, r3d, t5 ; m5=beta-1, t5=alpha-1, m7=mask0
  621. mova m4, t5
  622. mova mask0, m7
  623. pavgb m4, [pb_0]
  624. pavgb m4, [pb_1] ; alpha/4+1
  625. DIFF_GT2 p0, q0, m4, m6, m7 ; m6 = |p0-q0| > alpha/4+1
  626. pand m6, mask0
  627. DIFF_GT2 p0, p2, m5, m4, m7 ; m4 = |p2-p0| > beta-1
  628. pand m4, m6
  629. mova mask1p, m4
  630. DIFF_GT2 q0, q2, m5, m4, m7 ; m4 = |q2-q0| > beta-1
  631. pand m4, m6
  632. mova mask1q, m4
  633. %endif
  634. LUMA_INTRA_P012 [r4+r5], [r4+2*r1], [r4+r1], [r4]
  635. LUMA_INTRA_SWAP_PQ
  636. LUMA_INTRA_P012 [r0], [r0+r1], [r0+2*r1], [r0+r5]
  637. .end:
  638. %if ARCH_X86_64 == 0
  639. add esp, 0x60
  640. %endif
  641. RET
  642. INIT_MMX
  643. %if ARCH_X86_64
  644. ;-----------------------------------------------------------------------------
  645. ; void deblock_h_luma_intra( uint8_t *pix, int stride, int alpha, int beta )
  646. ;-----------------------------------------------------------------------------
  647. cglobal deblock_h_luma_intra_8_%1, 4,9
  648. movsxd r7, r1d
  649. lea r8, [r7*3]
  650. lea r6, [r0-4]
  651. lea r5, [r0-4+r8]
  652. sub rsp, 0x88
  653. %define pix_tmp rsp
  654. ; transpose 8x16 -> tmp space
  655. TRANSPOSE8x8_MEM PASS8ROWS(r6, r5, r7, r8), PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30)
  656. lea r6, [r6+r7*8]
  657. lea r5, [r5+r7*8]
  658. TRANSPOSE8x8_MEM PASS8ROWS(r6, r5, r7, r8), PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30)
  659. lea r0, [pix_tmp+0x40]
  660. mov r1, 0x10
  661. call deblock_v_luma_intra_8_%1
  662. ; transpose 16x6 -> original space (but we can't write only 6 pixels, so really 16x8)
  663. lea r5, [r6+r8]
  664. TRANSPOSE8x8_MEM PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30), PASS8ROWS(r6, r5, r7, r8)
  665. shl r7, 3
  666. sub r6, r7
  667. sub r5, r7
  668. shr r7, 3
  669. TRANSPOSE8x8_MEM PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30), PASS8ROWS(r6, r5, r7, r8)
  670. add rsp, 0x88
  671. RET
  672. %else
  673. cglobal deblock_h_luma_intra_8_%1, 2,4
  674. lea r3, [r1*3]
  675. sub r0, 4
  676. lea r2, [r0+r3]
  677. %assign pad 0x8c-(stack_offset&15)
  678. SUB rsp, pad
  679. %define pix_tmp rsp
  680. ; transpose 8x16 -> tmp space
  681. TRANSPOSE8x8_MEM PASS8ROWS(r0, r2, r1, r3), PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30)
  682. lea r0, [r0+r1*8]
  683. lea r2, [r2+r1*8]
  684. TRANSPOSE8x8_MEM PASS8ROWS(r0, r2, r1, r3), PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30)
  685. lea r0, [pix_tmp+0x40]
  686. PUSH dword r3m
  687. PUSH dword r2m
  688. PUSH dword 16
  689. PUSH r0
  690. call deblock_%2_luma_intra_8_%1
  691. %ifidn %2, v8
  692. add dword [rsp], 8 ; pix_tmp+8
  693. call deblock_%2_luma_intra_8_%1
  694. %endif
  695. ADD esp, 16
  696. mov r1, r1m
  697. mov r0, r0mp
  698. lea r3, [r1*3]
  699. sub r0, 4
  700. lea r2, [r0+r3]
  701. ; transpose 16x6 -> original space (but we can't write only 6 pixels, so really 16x8)
  702. TRANSPOSE8x8_MEM PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30), PASS8ROWS(r0, r2, r1, r3)
  703. lea r0, [r0+r1*8]
  704. lea r2, [r2+r1*8]
  705. TRANSPOSE8x8_MEM PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30), PASS8ROWS(r0, r2, r1, r3)
  706. ADD rsp, pad
  707. RET
  708. %endif ; ARCH_X86_64
  709. %endmacro ; DEBLOCK_LUMA_INTRA
  710. INIT_XMM
  711. DEBLOCK_LUMA_INTRA sse2, v
  712. INIT_AVX
  713. DEBLOCK_LUMA_INTRA avx , v
  714. %if ARCH_X86_64 == 0
  715. INIT_MMX
  716. DEBLOCK_LUMA_INTRA mmxext, v8
  717. %endif
  718. INIT_MMX
  719. %macro CHROMA_V_START 0
  720. dec r2d ; alpha-1
  721. dec r3d ; beta-1
  722. mov t5, r0
  723. sub t5, r1
  724. sub t5, r1
  725. %endmacro
  726. %macro CHROMA_H_START 0
  727. dec r2d
  728. dec r3d
  729. sub r0, 2
  730. lea t6, [r1*3]
  731. mov t5, r0
  732. add r0, t6
  733. %endmacro
  734. %define t5 r5
  735. %define t6 r6
  736. ;-----------------------------------------------------------------------------
  737. ; void ff_deblock_v_chroma( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
  738. ;-----------------------------------------------------------------------------
  739. cglobal deblock_v_chroma_8_mmxext, 5,6
  740. CHROMA_V_START
  741. movq m0, [t5]
  742. movq m1, [t5+r1]
  743. movq m2, [r0]
  744. movq m3, [r0+r1]
  745. call ff_chroma_inter_body_mmxext
  746. movq [t5+r1], m1
  747. movq [r0], m2
  748. RET
  749. ;-----------------------------------------------------------------------------
  750. ; void ff_deblock_h_chroma( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
  751. ;-----------------------------------------------------------------------------
  752. cglobal deblock_h_chroma_8_mmxext, 5,7
  753. %if UNIX64
  754. %define buf0 [rsp-24]
  755. %define buf1 [rsp-16]
  756. %elif WIN64
  757. sub rsp, 16
  758. %define buf0 [rsp]
  759. %define buf1 [rsp+8]
  760. %else
  761. %define buf0 r0m
  762. %define buf1 r2m
  763. %endif
  764. CHROMA_H_START
  765. TRANSPOSE4x8_LOAD bw, wd, dq, PASS8ROWS(t5, r0, r1, t6)
  766. movq buf0, m0
  767. movq buf1, m3
  768. call ff_chroma_inter_body_mmxext
  769. movq m0, buf0
  770. movq m3, buf1
  771. TRANSPOSE8x4B_STORE PASS8ROWS(t5, r0, r1, t6)
  772. %if WIN64
  773. add rsp, 16
  774. %endif
  775. RET
  776. ALIGN 16
  777. ff_chroma_inter_body_mmxext:
  778. LOAD_MASK r2d, r3d
  779. movd m6, [r4] ; tc0
  780. punpcklbw m6, m6
  781. pand m7, m6
  782. DEBLOCK_P0_Q0
  783. ret
  784. ; in: %1=p0 %2=p1 %3=q1
  785. ; out: p0 = (p0 + q1 + 2*p1 + 2) >> 2
  786. %macro CHROMA_INTRA_P0 3
  787. movq m4, %1
  788. pxor m4, %3
  789. pand m4, [pb_1] ; m4 = (p0^q1)&1
  790. pavgb %1, %3
  791. psubusb %1, m4
  792. pavgb %1, %2 ; dst = avg(p1, avg(p0,q1) - ((p0^q1)&1))
  793. %endmacro
  794. %define t5 r4
  795. %define t6 r5
  796. ;-----------------------------------------------------------------------------
  797. ; void ff_deblock_v_chroma_intra( uint8_t *pix, int stride, int alpha, int beta )
  798. ;-----------------------------------------------------------------------------
  799. cglobal deblock_v_chroma_intra_8_mmxext, 4,5
  800. CHROMA_V_START
  801. movq m0, [t5]
  802. movq m1, [t5+r1]
  803. movq m2, [r0]
  804. movq m3, [r0+r1]
  805. call ff_chroma_intra_body_mmxext
  806. movq [t5+r1], m1
  807. movq [r0], m2
  808. RET
  809. ;-----------------------------------------------------------------------------
  810. ; void ff_deblock_h_chroma_intra( uint8_t *pix, int stride, int alpha, int beta )
  811. ;-----------------------------------------------------------------------------
  812. cglobal deblock_h_chroma_intra_8_mmxext, 4,6
  813. CHROMA_H_START
  814. TRANSPOSE4x8_LOAD bw, wd, dq, PASS8ROWS(t5, r0, r1, t6)
  815. call ff_chroma_intra_body_mmxext
  816. TRANSPOSE8x4B_STORE PASS8ROWS(t5, r0, r1, t6)
  817. RET
  818. ALIGN 16
  819. ff_chroma_intra_body_mmxext:
  820. LOAD_MASK r2d, r3d
  821. movq m5, m1
  822. movq m6, m2
  823. CHROMA_INTRA_P0 m1, m0, m3
  824. CHROMA_INTRA_P0 m2, m3, m0
  825. psubb m1, m5
  826. psubb m2, m6
  827. pand m1, m7
  828. pand m2, m7
  829. paddb m1, m5
  830. paddb m2, m6
  831. ret