You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1083 lines
29KB

  1. ;*****************************************************************************
  2. ;* MMX/SSE2/AVX-optimized H.264 deblocking code
  3. ;*****************************************************************************
  4. ;* Copyright (C) 2005-2011 x264 project
  5. ;*
  6. ;* Authors: Loren Merritt <lorenm@u.washington.edu>
  7. ;* Fiona Glaser <fiona@x264.com>
  8. ;* Oskar Arvidsson <oskar@irock.se>
  9. ;*
  10. ;* This file is part of Libav.
  11. ;*
  12. ;* Libav is free software; you can redistribute it and/or
  13. ;* modify it under the terms of the GNU Lesser General Public
  14. ;* License as published by the Free Software Foundation; either
  15. ;* version 2.1 of the License, or (at your option) any later version.
  16. ;*
  17. ;* Libav is distributed in the hope that it will be useful,
  18. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  20. ;* Lesser General Public License for more details.
  21. ;*
  22. ;* You should have received a copy of the GNU Lesser General Public
  23. ;* License along with Libav; if not, write to the Free Software
  24. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  25. ;******************************************************************************
  26. %include "libavutil/x86/x86util.asm"
  27. SECTION_RODATA
  28. pb_A1: times 16 db 0xA1
  29. pb_3_1: times 4 db 3, 1
  30. SECTION .text
  31. cextern pb_0
  32. cextern pb_1
  33. cextern pb_3
  34. ; expands to [base],...,[base+7*stride]
  35. %define PASS8ROWS(base, base3, stride, stride3) \
  36. [base], [base+stride], [base+stride*2], [base3], \
  37. [base3+stride], [base3+stride*2], [base3+stride3], [base3+stride*4]
  38. %define PASS8ROWS(base, base3, stride, stride3, offset) \
  39. PASS8ROWS(base+offset, base3+offset, stride, stride3)
  40. ; in: 8 rows of 4 bytes in %4..%11
  41. ; out: 4 rows of 8 bytes in m0..m3
  42. %macro TRANSPOSE4x8_LOAD 11
  43. movh m0, %4
  44. movh m2, %5
  45. movh m1, %6
  46. movh m3, %7
  47. punpckl%1 m0, m2
  48. punpckl%1 m1, m3
  49. mova m2, m0
  50. punpckl%2 m0, m1
  51. punpckh%2 m2, m1
  52. movh m4, %8
  53. movh m6, %9
  54. movh m5, %10
  55. movh m7, %11
  56. punpckl%1 m4, m6
  57. punpckl%1 m5, m7
  58. mova m6, m4
  59. punpckl%2 m4, m5
  60. punpckh%2 m6, m5
  61. punpckh%3 m1, m0, m4
  62. punpckh%3 m3, m2, m6
  63. punpckl%3 m0, m4
  64. punpckl%3 m2, m6
  65. %endmacro
  66. ; in: 4 rows of 8 bytes in m0..m3
  67. ; out: 8 rows of 4 bytes in %1..%8
  68. %macro TRANSPOSE8x4B_STORE 8
  69. punpckhdq m4, m0, m0
  70. punpckhdq m5, m1, m1
  71. punpckhdq m6, m2, m2
  72. punpcklbw m0, m1
  73. punpcklbw m2, m3
  74. punpcklwd m1, m0, m2
  75. punpckhwd m0, m2
  76. movh %1, m1
  77. punpckhdq m1, m1
  78. movh %2, m1
  79. movh %3, m0
  80. punpckhdq m0, m0
  81. movh %4, m0
  82. punpckhdq m3, m3
  83. punpcklbw m4, m5
  84. punpcklbw m6, m3
  85. punpcklwd m5, m4, m6
  86. punpckhwd m4, m6
  87. movh %5, m5
  88. punpckhdq m5, m5
  89. movh %6, m5
  90. movh %7, m4
  91. punpckhdq m4, m4
  92. movh %8, m4
  93. %endmacro
  94. %macro TRANSPOSE4x8B_LOAD 8
  95. TRANSPOSE4x8_LOAD bw, wd, dq, %1, %2, %3, %4, %5, %6, %7, %8
  96. %endmacro
  97. %macro SBUTTERFLY3 4
  98. punpckh%1 %4, %2, %3
  99. punpckl%1 %2, %3
  100. %endmacro
  101. ; in: 8 rows of 8 (only the middle 6 pels are used) in %1..%8
  102. ; out: 6 rows of 8 in [%9+0*16] .. [%9+5*16]
  103. %macro TRANSPOSE6x8_MEM 9
  104. RESET_MM_PERMUTATION
  105. movq m0, %1
  106. movq m1, %2
  107. movq m2, %3
  108. movq m3, %4
  109. movq m4, %5
  110. movq m5, %6
  111. movq m6, %7
  112. SBUTTERFLY bw, 0, 1, 7
  113. SBUTTERFLY bw, 2, 3, 7
  114. SBUTTERFLY bw, 4, 5, 7
  115. movq [%9+0x10], m3
  116. SBUTTERFLY3 bw, m6, %8, m7
  117. SBUTTERFLY wd, 0, 2, 3
  118. SBUTTERFLY wd, 4, 6, 3
  119. punpckhdq m0, m4
  120. movq [%9+0x00], m0
  121. SBUTTERFLY3 wd, m1, [%9+0x10], m3
  122. SBUTTERFLY wd, 5, 7, 0
  123. SBUTTERFLY dq, 1, 5, 0
  124. SBUTTERFLY dq, 2, 6, 0
  125. punpckldq m3, m7
  126. movq [%9+0x10], m2
  127. movq [%9+0x20], m6
  128. movq [%9+0x30], m1
  129. movq [%9+0x40], m5
  130. movq [%9+0x50], m3
  131. RESET_MM_PERMUTATION
  132. %endmacro
  133. ; in: 8 rows of 8 in %1..%8
  134. ; out: 8 rows of 8 in %9..%16
  135. %macro TRANSPOSE8x8_MEM 16
  136. RESET_MM_PERMUTATION
  137. movq m0, %1
  138. movq m1, %2
  139. movq m2, %3
  140. movq m3, %4
  141. movq m4, %5
  142. movq m5, %6
  143. movq m6, %7
  144. SBUTTERFLY bw, 0, 1, 7
  145. SBUTTERFLY bw, 2, 3, 7
  146. SBUTTERFLY bw, 4, 5, 7
  147. SBUTTERFLY3 bw, m6, %8, m7
  148. movq %9, m5
  149. SBUTTERFLY wd, 0, 2, 5
  150. SBUTTERFLY wd, 4, 6, 5
  151. SBUTTERFLY wd, 1, 3, 5
  152. movq %11, m6
  153. movq m6, %9
  154. SBUTTERFLY wd, 6, 7, 5
  155. SBUTTERFLY dq, 0, 4, 5
  156. SBUTTERFLY dq, 1, 6, 5
  157. movq %9, m0
  158. movq %10, m4
  159. movq %13, m1
  160. movq %14, m6
  161. SBUTTERFLY3 dq, m2, %11, m0
  162. SBUTTERFLY dq, 3, 7, 4
  163. movq %11, m2
  164. movq %12, m0
  165. movq %15, m3
  166. movq %16, m7
  167. RESET_MM_PERMUTATION
  168. %endmacro
  169. ; out: %4 = |%1-%2|>%3
  170. ; clobbers: %5
  171. %macro DIFF_GT 5
  172. %if avx_enabled == 0
  173. mova %5, %2
  174. mova %4, %1
  175. psubusb %5, %1
  176. psubusb %4, %2
  177. %else
  178. psubusb %5, %2, %1
  179. psubusb %4, %1, %2
  180. %endif
  181. por %4, %5
  182. psubusb %4, %3
  183. %endmacro
  184. ; out: %4 = |%1-%2|>%3
  185. ; clobbers: %5
  186. %macro DIFF_GT2 5
  187. %if ARCH_X86_64
  188. psubusb %5, %2, %1
  189. psubusb %4, %1, %2
  190. %else
  191. mova %5, %2
  192. mova %4, %1
  193. psubusb %5, %1
  194. psubusb %4, %2
  195. %endif
  196. psubusb %5, %3
  197. psubusb %4, %3
  198. pcmpeqb %4, %5
  199. %endmacro
  200. ; in: m0=p1 m1=p0 m2=q0 m3=q1 %1=alpha-1 %2=beta-1
  201. ; out: m5=beta-1, m7=mask, %3=alpha-1
  202. ; clobbers: m4,m6
  203. %macro LOAD_MASK 2-3
  204. movd m4, %1
  205. movd m5, %2
  206. SPLATW m4, m4
  207. SPLATW m5, m5
  208. packuswb m4, m4 ; 16x alpha-1
  209. packuswb m5, m5 ; 16x beta-1
  210. %if %0>2
  211. mova %3, m4
  212. %endif
  213. DIFF_GT m1, m2, m4, m7, m6 ; |p0-q0| > alpha-1
  214. DIFF_GT m0, m1, m5, m4, m6 ; |p1-p0| > beta-1
  215. por m7, m4
  216. DIFF_GT m3, m2, m5, m4, m6 ; |q1-q0| > beta-1
  217. por m7, m4
  218. pxor m6, m6
  219. pcmpeqb m7, m6
  220. %endmacro
  221. ; in: m0=p1 m1=p0 m2=q0 m3=q1 m7=(tc&mask)
  222. ; out: m1=p0' m2=q0'
  223. ; clobbers: m0,3-6
  224. %macro DEBLOCK_P0_Q0 0
  225. pcmpeqb m4, m4
  226. pxor m5, m1, m2 ; p0^q0
  227. pxor m3, m4
  228. pand m5, [pb_1] ; (p0^q0)&1
  229. pavgb m3, m0 ; (p1 - q1 + 256)>>1
  230. pxor m4, m1
  231. pavgb m3, [pb_3] ; (((p1 - q1 + 256)>>1)+4)>>1 = 64+2+(p1-q1)>>2
  232. pavgb m4, m2 ; (q0 - p0 + 256)>>1
  233. pavgb m3, m5
  234. mova m6, [pb_A1]
  235. paddusb m3, m4 ; d+128+33
  236. psubusb m6, m3
  237. psubusb m3, [pb_A1]
  238. pminub m6, m7
  239. pminub m3, m7
  240. psubusb m1, m6
  241. psubusb m2, m3
  242. paddusb m1, m3
  243. paddusb m2, m6
  244. %endmacro
  245. ; in: m1=p0 m2=q0
  246. ; %1=p1 %2=q2 %3=[q2] %4=[q1] %5=tc0 %6=tmp
  247. ; out: [q1] = clip( (q2+((p0+q0+1)>>1))>>1, q1-tc0, q1+tc0 )
  248. ; clobbers: q2, tmp, tc0
  249. %macro LUMA_Q1 6
  250. pavgb %6, m1, m2
  251. pavgb %2, %6 ; avg(p2,avg(p0,q0))
  252. pxor %6, %3
  253. pand %6, [pb_1] ; (p2^avg(p0,q0))&1
  254. psubusb %2, %6 ; (p2+((p0+q0+1)>>1))>>1
  255. psubusb %6, %1, %5
  256. paddusb %5, %1
  257. pmaxub %2, %6
  258. pminub %2, %5
  259. mova %4, %2
  260. %endmacro
  261. %if ARCH_X86_64
  262. ;-----------------------------------------------------------------------------
  263. ; void ff_deblock_v_luma(uint8_t *pix, int stride, int alpha, int beta,
  264. ; int8_t *tc0)
  265. ;-----------------------------------------------------------------------------
  266. %macro DEBLOCK_LUMA 0
  267. cglobal deblock_v_luma_8, 5,5,10
  268. movsxdifnidn r1, r1d
  269. movd m8, [r4] ; tc0
  270. lea r4, [r1*3]
  271. dec r2d ; alpha-1
  272. neg r4
  273. dec r3d ; beta-1
  274. add r4, r0 ; pix-3*stride
  275. mova m0, [r4+r1] ; p1
  276. mova m1, [r4+2*r1] ; p0
  277. mova m2, [r0] ; q0
  278. mova m3, [r0+r1] ; q1
  279. LOAD_MASK r2d, r3d
  280. punpcklbw m8, m8
  281. punpcklbw m8, m8 ; tc = 4x tc0[3], 4x tc0[2], 4x tc0[1], 4x tc0[0]
  282. pcmpeqb m9, m9
  283. pcmpeqb m9, m8
  284. pandn m9, m7
  285. pand m8, m9
  286. movdqa m3, [r4] ; p2
  287. DIFF_GT2 m1, m3, m5, m6, m7 ; |p2-p0| > beta-1
  288. pand m6, m9
  289. psubb m7, m8, m6
  290. pand m6, m8
  291. LUMA_Q1 m0, m3, [r4], [r4+r1], m6, m4
  292. movdqa m4, [r0+2*r1] ; q2
  293. DIFF_GT2 m2, m4, m5, m6, m3 ; |q2-q0| > beta-1
  294. pand m6, m9
  295. pand m8, m6
  296. psubb m7, m6
  297. mova m3, [r0+r1]
  298. LUMA_Q1 m3, m4, [r0+2*r1], [r0+r1], m8, m6
  299. DEBLOCK_P0_Q0
  300. mova [r4+2*r1], m1
  301. mova [r0], m2
  302. RET
  303. ;-----------------------------------------------------------------------------
  304. ; void ff_deblock_h_luma(uint8_t *pix, int stride, int alpha, int beta,
  305. ; int8_t *tc0)
  306. ;-----------------------------------------------------------------------------
  307. INIT_MMX cpuname
  308. cglobal deblock_h_luma_8, 5,9,0,0x60+16*WIN64
  309. movsxd r7, r1d
  310. movsxdifnidn r1, r1d
  311. lea r8, [r7+r7*2]
  312. lea r6, [r0-4]
  313. lea r5, [r0-4+r8]
  314. %if WIN64
  315. %define pix_tmp rsp+0x30 ; shadow space + r4
  316. %else
  317. %define pix_tmp rsp
  318. %endif
  319. ; transpose 6x16 -> tmp space
  320. TRANSPOSE6x8_MEM PASS8ROWS(r6, r5, r7, r8), pix_tmp
  321. lea r6, [r6+r7*8]
  322. lea r5, [r5+r7*8]
  323. TRANSPOSE6x8_MEM PASS8ROWS(r6, r5, r7, r8), pix_tmp+8
  324. ; vertical filter
  325. ; alpha, beta, tc0 are still in r2d, r3d, r4
  326. ; don't backup r6, r5, r7, r8 because deblock_v_luma_sse2 doesn't use them
  327. lea r0, [pix_tmp+0x30]
  328. mov r1d, 0x10
  329. %if WIN64
  330. mov [rsp+0x20], r4
  331. %endif
  332. call deblock_v_luma_8
  333. ; transpose 16x4 -> original space (only the middle 4 rows were changed by the filter)
  334. add r6, 2
  335. add r5, 2
  336. movq m0, [pix_tmp+0x18]
  337. movq m1, [pix_tmp+0x28]
  338. movq m2, [pix_tmp+0x38]
  339. movq m3, [pix_tmp+0x48]
  340. TRANSPOSE8x4B_STORE PASS8ROWS(r6, r5, r7, r8)
  341. shl r7, 3
  342. sub r6, r7
  343. sub r5, r7
  344. shr r7, 3
  345. movq m0, [pix_tmp+0x10]
  346. movq m1, [pix_tmp+0x20]
  347. movq m2, [pix_tmp+0x30]
  348. movq m3, [pix_tmp+0x40]
  349. TRANSPOSE8x4B_STORE PASS8ROWS(r6, r5, r7, r8)
  350. RET
  351. %endmacro
  352. INIT_XMM sse2
  353. DEBLOCK_LUMA
  354. INIT_XMM avx
  355. DEBLOCK_LUMA
  356. %else
  357. %macro DEBLOCK_LUMA 2
  358. ;-----------------------------------------------------------------------------
  359. ; void ff_deblock_v8_luma(uint8_t *pix, int stride, int alpha, int beta,
  360. ; int8_t *tc0)
  361. ;-----------------------------------------------------------------------------
  362. cglobal deblock_%1_luma_8, 5,5,8,2*%2
  363. movsxdifnidn r1, r1d
  364. lea r4, [r1*3]
  365. dec r2 ; alpha-1
  366. neg r4
  367. dec r3 ; beta-1
  368. add r4, r0 ; pix-3*stride
  369. mova m0, [r4+r1] ; p1
  370. mova m1, [r4+2*r1] ; p0
  371. mova m2, [r0] ; q0
  372. mova m3, [r0+r1] ; q1
  373. LOAD_MASK r2, r3
  374. mov r3, r4mp
  375. pcmpeqb m3, m3
  376. movd m4, [r3] ; tc0
  377. punpcklbw m4, m4
  378. punpcklbw m4, m4 ; tc = 4x tc0[3], 4x tc0[2], 4x tc0[1], 4x tc0[0]
  379. mova [esp+%2], m4 ; tc
  380. pcmpgtb m4, m3
  381. mova m3, [r4] ; p2
  382. pand m4, m7
  383. mova [esp], m4 ; mask
  384. DIFF_GT2 m1, m3, m5, m6, m7 ; |p2-p0| > beta-1
  385. pand m6, m4
  386. pand m4, [esp+%2] ; tc
  387. psubb m7, m4, m6
  388. pand m6, m4
  389. LUMA_Q1 m0, m3, [r4], [r4+r1], m6, m4
  390. mova m4, [r0+2*r1] ; q2
  391. DIFF_GT2 m2, m4, m5, m6, m3 ; |q2-q0| > beta-1
  392. pand m6, [esp] ; mask
  393. mova m5, [esp+%2] ; tc
  394. psubb m7, m6
  395. pand m5, m6
  396. mova m3, [r0+r1]
  397. LUMA_Q1 m3, m4, [r0+2*r1], [r0+r1], m5, m6
  398. DEBLOCK_P0_Q0
  399. mova [r4+2*r1], m1
  400. mova [r0], m2
  401. RET
  402. ;-----------------------------------------------------------------------------
  403. ; void ff_deblock_h_luma(uint8_t *pix, int stride, int alpha, int beta,
  404. ; int8_t *tc0)
  405. ;-----------------------------------------------------------------------------
  406. INIT_MMX cpuname
  407. cglobal deblock_h_luma_8, 0,5,8,0x60+12
  408. movsxdifnidn r1, r1d
  409. mov r0, r0mp
  410. mov r3, r1m
  411. lea r4, [r3*3]
  412. sub r0, 4
  413. lea r1, [r0+r4]
  414. %define pix_tmp esp+12
  415. ; transpose 6x16 -> tmp space
  416. TRANSPOSE6x8_MEM PASS8ROWS(r0, r1, r3, r4), pix_tmp
  417. lea r0, [r0+r3*8]
  418. lea r1, [r1+r3*8]
  419. TRANSPOSE6x8_MEM PASS8ROWS(r0, r1, r3, r4), pix_tmp+8
  420. ; vertical filter
  421. lea r0, [pix_tmp+0x30]
  422. PUSH dword r4m
  423. PUSH dword r3m
  424. PUSH dword r2m
  425. PUSH dword 16
  426. PUSH dword r0
  427. call deblock_%1_luma_8
  428. %ifidn %1, v8
  429. add dword [esp ], 8 ; pix_tmp+0x38
  430. add dword [esp+16], 2 ; tc0+2
  431. call deblock_%1_luma_8
  432. %endif
  433. ADD esp, 20
  434. ; transpose 16x4 -> original space (only the middle 4 rows were changed by the filter)
  435. mov r0, r0mp
  436. sub r0, 2
  437. movq m0, [pix_tmp+0x10]
  438. movq m1, [pix_tmp+0x20]
  439. lea r1, [r0+r4]
  440. movq m2, [pix_tmp+0x30]
  441. movq m3, [pix_tmp+0x40]
  442. TRANSPOSE8x4B_STORE PASS8ROWS(r0, r1, r3, r4)
  443. lea r0, [r0+r3*8]
  444. lea r1, [r1+r3*8]
  445. movq m0, [pix_tmp+0x18]
  446. movq m1, [pix_tmp+0x28]
  447. movq m2, [pix_tmp+0x38]
  448. movq m3, [pix_tmp+0x48]
  449. TRANSPOSE8x4B_STORE PASS8ROWS(r0, r1, r3, r4)
  450. RET
  451. %endmacro ; DEBLOCK_LUMA
  452. INIT_MMX mmxext
  453. DEBLOCK_LUMA v8, 8
  454. INIT_XMM sse2
  455. DEBLOCK_LUMA v, 16
  456. INIT_XMM avx
  457. DEBLOCK_LUMA v, 16
  458. %endif ; ARCH
  459. %macro LUMA_INTRA_P012 4 ; p0..p3 in memory
  460. %if ARCH_X86_64
  461. pavgb t0, p2, p1
  462. pavgb t1, p0, q0
  463. %else
  464. mova t0, p2
  465. mova t1, p0
  466. pavgb t0, p1
  467. pavgb t1, q0
  468. %endif
  469. pavgb t0, t1 ; ((p2+p1+1)/2 + (p0+q0+1)/2 + 1)/2
  470. mova t5, t1
  471. %if ARCH_X86_64
  472. paddb t2, p2, p1
  473. paddb t3, p0, q0
  474. %else
  475. mova t2, p2
  476. mova t3, p0
  477. paddb t2, p1
  478. paddb t3, q0
  479. %endif
  480. paddb t2, t3
  481. mova t3, t2
  482. mova t4, t2
  483. psrlw t2, 1
  484. pavgb t2, mpb_0
  485. pxor t2, t0
  486. pand t2, mpb_1
  487. psubb t0, t2 ; p1' = (p2+p1+p0+q0+2)/4;
  488. %if ARCH_X86_64
  489. pavgb t1, p2, q1
  490. psubb t2, p2, q1
  491. %else
  492. mova t1, p2
  493. mova t2, p2
  494. pavgb t1, q1
  495. psubb t2, q1
  496. %endif
  497. paddb t3, t3
  498. psubb t3, t2 ; p2+2*p1+2*p0+2*q0+q1
  499. pand t2, mpb_1
  500. psubb t1, t2
  501. pavgb t1, p1
  502. pavgb t1, t5 ; (((p2+q1)/2 + p1+1)/2 + (p0+q0+1)/2 + 1)/2
  503. psrlw t3, 2
  504. pavgb t3, mpb_0
  505. pxor t3, t1
  506. pand t3, mpb_1
  507. psubb t1, t3 ; p0'a = (p2+2*p1+2*p0+2*q0+q1+4)/8
  508. pxor t3, p0, q1
  509. pavgb t2, p0, q1
  510. pand t3, mpb_1
  511. psubb t2, t3
  512. pavgb t2, p1 ; p0'b = (2*p1+p0+q0+2)/4
  513. pxor t1, t2
  514. pxor t2, p0
  515. pand t1, mask1p
  516. pand t2, mask0
  517. pxor t1, t2
  518. pxor t1, p0
  519. mova %1, t1 ; store p0
  520. mova t1, %4 ; p3
  521. paddb t2, t1, p2
  522. pavgb t1, p2
  523. pavgb t1, t0 ; (p3+p2+1)/2 + (p2+p1+p0+q0+2)/4
  524. paddb t2, t2
  525. paddb t2, t4 ; 2*p3+3*p2+p1+p0+q0
  526. psrlw t2, 2
  527. pavgb t2, mpb_0
  528. pxor t2, t1
  529. pand t2, mpb_1
  530. psubb t1, t2 ; p2' = (2*p3+3*p2+p1+p0+q0+4)/8
  531. pxor t0, p1
  532. pxor t1, p2
  533. pand t0, mask1p
  534. pand t1, mask1p
  535. pxor t0, p1
  536. pxor t1, p2
  537. mova %2, t0 ; store p1
  538. mova %3, t1 ; store p2
  539. %endmacro
  540. %macro LUMA_INTRA_SWAP_PQ 0
  541. %define q1 m0
  542. %define q0 m1
  543. %define p0 m2
  544. %define p1 m3
  545. %define p2 q2
  546. %define mask1p mask1q
  547. %endmacro
  548. %macro DEBLOCK_LUMA_INTRA 1
  549. %define p1 m0
  550. %define p0 m1
  551. %define q0 m2
  552. %define q1 m3
  553. %define t0 m4
  554. %define t1 m5
  555. %define t2 m6
  556. %define t3 m7
  557. %if ARCH_X86_64
  558. %define p2 m8
  559. %define q2 m9
  560. %define t4 m10
  561. %define t5 m11
  562. %define mask0 m12
  563. %define mask1p m13
  564. %if WIN64
  565. %define mask1q [rsp]
  566. %else
  567. %define mask1q [rsp-24]
  568. %endif
  569. %define mpb_0 m14
  570. %define mpb_1 m15
  571. %else
  572. %define spill(x) [esp+16*x]
  573. %define p2 [r4+r1]
  574. %define q2 [r0+2*r1]
  575. %define t4 spill(0)
  576. %define t5 spill(1)
  577. %define mask0 spill(2)
  578. %define mask1p spill(3)
  579. %define mask1q spill(4)
  580. %define mpb_0 [pb_0]
  581. %define mpb_1 [pb_1]
  582. %endif
  583. ;-----------------------------------------------------------------------------
  584. ; void ff_deblock_v_luma_intra(uint8_t *pix, int stride, int alpha, int beta)
  585. ;-----------------------------------------------------------------------------
  586. %if WIN64
  587. cglobal deblock_%1_luma_intra_8, 4,6,16,0x10
  588. %else
  589. cglobal deblock_%1_luma_intra_8, 4,6,16,ARCH_X86_64*0x50-0x50
  590. %endif
  591. movsxdifnidn r1, r1d
  592. lea r4, [r1*4]
  593. lea r5, [r1*3] ; 3*stride
  594. dec r2d ; alpha-1
  595. jl .end
  596. neg r4
  597. dec r3d ; beta-1
  598. jl .end
  599. add r4, r0 ; pix-4*stride
  600. mova p1, [r4+2*r1]
  601. mova p0, [r4+r5]
  602. mova q0, [r0]
  603. mova q1, [r0+r1]
  604. %if ARCH_X86_64
  605. pxor mpb_0, mpb_0
  606. mova mpb_1, [pb_1]
  607. LOAD_MASK r2d, r3d, t5 ; m5=beta-1, t5=alpha-1, m7=mask0
  608. SWAP 7, 12 ; m12=mask0
  609. pavgb t5, mpb_0
  610. pavgb t5, mpb_1 ; alpha/4+1
  611. movdqa p2, [r4+r1]
  612. movdqa q2, [r0+2*r1]
  613. DIFF_GT2 p0, q0, t5, t0, t3 ; t0 = |p0-q0| > alpha/4+1
  614. DIFF_GT2 p0, p2, m5, t2, t5 ; mask1 = |p2-p0| > beta-1
  615. DIFF_GT2 q0, q2, m5, t4, t5 ; t4 = |q2-q0| > beta-1
  616. pand t0, mask0
  617. pand t4, t0
  618. pand t2, t0
  619. mova mask1q, t4
  620. mova mask1p, t2
  621. %else
  622. LOAD_MASK r2d, r3d, t5 ; m5=beta-1, t5=alpha-1, m7=mask0
  623. mova m4, t5
  624. mova mask0, m7
  625. pavgb m4, [pb_0]
  626. pavgb m4, [pb_1] ; alpha/4+1
  627. DIFF_GT2 p0, q0, m4, m6, m7 ; m6 = |p0-q0| > alpha/4+1
  628. pand m6, mask0
  629. DIFF_GT2 p0, p2, m5, m4, m7 ; m4 = |p2-p0| > beta-1
  630. pand m4, m6
  631. mova mask1p, m4
  632. DIFF_GT2 q0, q2, m5, m4, m7 ; m4 = |q2-q0| > beta-1
  633. pand m4, m6
  634. mova mask1q, m4
  635. %endif
  636. LUMA_INTRA_P012 [r4+r5], [r4+2*r1], [r4+r1], [r4]
  637. LUMA_INTRA_SWAP_PQ
  638. LUMA_INTRA_P012 [r0], [r0+r1], [r0+2*r1], [r0+r5]
  639. .end:
  640. RET
  641. INIT_MMX cpuname
  642. %if ARCH_X86_64
  643. ;-----------------------------------------------------------------------------
  644. ; void ff_deblock_h_luma_intra(uint8_t *pix, int stride, int alpha, int beta)
  645. ;-----------------------------------------------------------------------------
  646. cglobal deblock_h_luma_intra_8, 4,9,0,0x80
  647. movsxd r7, r1d
  648. movsxdifnidn r1, r1d
  649. lea r8, [r7*3]
  650. lea r6, [r0-4]
  651. lea r5, [r0-4+r8]
  652. %if WIN64
  653. %define pix_tmp rsp+0x20 ; shadow space
  654. %else
  655. %define pix_tmp rsp
  656. %endif
  657. ; transpose 8x16 -> tmp space
  658. TRANSPOSE8x8_MEM PASS8ROWS(r6, r5, r7, r8), PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30)
  659. lea r6, [r6+r7*8]
  660. lea r5, [r5+r7*8]
  661. TRANSPOSE8x8_MEM PASS8ROWS(r6, r5, r7, r8), PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30)
  662. lea r0, [pix_tmp+0x40]
  663. mov r1, 0x10
  664. call deblock_v_luma_intra_8
  665. ; transpose 16x6 -> original space (but we can't write only 6 pixels, so really 16x8)
  666. lea r5, [r6+r8]
  667. TRANSPOSE8x8_MEM PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30), PASS8ROWS(r6, r5, r7, r8)
  668. shl r7, 3
  669. sub r6, r7
  670. sub r5, r7
  671. shr r7, 3
  672. TRANSPOSE8x8_MEM PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30), PASS8ROWS(r6, r5, r7, r8)
  673. RET
  674. %else
  675. cglobal deblock_h_luma_intra_8, 2,4,8,0x80
  676. lea r3, [r1*3]
  677. sub r0, 4
  678. lea r2, [r0+r3]
  679. %define pix_tmp rsp
  680. ; transpose 8x16 -> tmp space
  681. TRANSPOSE8x8_MEM PASS8ROWS(r0, r2, r1, r3), PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30)
  682. lea r0, [r0+r1*8]
  683. lea r2, [r2+r1*8]
  684. TRANSPOSE8x8_MEM PASS8ROWS(r0, r2, r1, r3), PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30)
  685. lea r0, [pix_tmp+0x40]
  686. PUSH dword r3m
  687. PUSH dword r2m
  688. PUSH dword 16
  689. PUSH r0
  690. call deblock_%1_luma_intra_8
  691. %ifidn %1, v8
  692. add dword [rsp], 8 ; pix_tmp+8
  693. call deblock_%1_luma_intra_8
  694. %endif
  695. ADD esp, 16
  696. mov r1, r1m
  697. mov r0, r0mp
  698. lea r3, [r1*3]
  699. sub r0, 4
  700. lea r2, [r0+r3]
  701. ; transpose 16x6 -> original space (but we can't write only 6 pixels, so really 16x8)
  702. TRANSPOSE8x8_MEM PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30), PASS8ROWS(r0, r2, r1, r3)
  703. lea r0, [r0+r1*8]
  704. lea r2, [r2+r1*8]
  705. TRANSPOSE8x8_MEM PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30), PASS8ROWS(r0, r2, r1, r3)
  706. RET
  707. %endif ; ARCH_X86_64
  708. %endmacro ; DEBLOCK_LUMA_INTRA
  709. INIT_XMM sse2
  710. DEBLOCK_LUMA_INTRA v
  711. INIT_XMM avx
  712. DEBLOCK_LUMA_INTRA v
  713. %if ARCH_X86_64 == 0
  714. INIT_MMX mmxext
  715. DEBLOCK_LUMA_INTRA v8
  716. %endif
  717. INIT_MMX mmxext
  718. %macro CHROMA_V_START 0
  719. movsxdifnidn r1, r1d
  720. dec r2d ; alpha-1
  721. dec r3d ; beta-1
  722. mov t5, r0
  723. sub t5, r1
  724. sub t5, r1
  725. %endmacro
  726. %macro CHROMA_H_START 0
  727. movsxdifnidn r1, r1d
  728. dec r2d
  729. dec r3d
  730. sub r0, 2
  731. lea t6, [r1*3]
  732. mov t5, r0
  733. add r0, t6
  734. %endmacro
  735. %define t5 r5
  736. %define t6 r6
  737. ;-----------------------------------------------------------------------------
  738. ; void ff_deblock_v_chroma(uint8_t *pix, int stride, int alpha, int beta,
  739. ; int8_t *tc0)
  740. ;-----------------------------------------------------------------------------
  741. cglobal deblock_v_chroma_8, 5,6
  742. CHROMA_V_START
  743. movq m0, [t5]
  744. movq m1, [t5+r1]
  745. movq m2, [r0]
  746. movq m3, [r0+r1]
  747. call ff_chroma_inter_body_mmxext
  748. movq [t5+r1], m1
  749. movq [r0], m2
  750. RET
  751. ;-----------------------------------------------------------------------------
  752. ; void ff_deblock_h_chroma(uint8_t *pix, int stride, int alpha, int beta,
  753. ; int8_t *tc0)
  754. ;-----------------------------------------------------------------------------
  755. cglobal deblock_h_chroma_8, 5,7
  756. %if ARCH_X86_64
  757. ; This could use the red zone on 64 bit unix to avoid the stack pointer
  758. ; readjustment, but valgrind assumes the red zone is clobbered on
  759. ; function calls and returns.
  760. sub rsp, 16
  761. %define buf0 [rsp]
  762. %define buf1 [rsp+8]
  763. %else
  764. %define buf0 r0m
  765. %define buf1 r2m
  766. %endif
  767. CHROMA_H_START
  768. TRANSPOSE4x8_LOAD bw, wd, dq, PASS8ROWS(t5, r0, r1, t6)
  769. movq buf0, m0
  770. movq buf1, m3
  771. call ff_chroma_inter_body_mmxext
  772. movq m0, buf0
  773. movq m3, buf1
  774. TRANSPOSE8x4B_STORE PASS8ROWS(t5, r0, r1, t6)
  775. %if ARCH_X86_64
  776. add rsp, 16
  777. %endif
  778. RET
  779. ALIGN 16
  780. ff_chroma_inter_body_mmxext:
  781. LOAD_MASK r2d, r3d
  782. movd m6, [r4] ; tc0
  783. punpcklbw m6, m6
  784. pand m7, m6
  785. DEBLOCK_P0_Q0
  786. ret
  787. ; in: %1=p0 %2=p1 %3=q1
  788. ; out: p0 = (p0 + q1 + 2*p1 + 2) >> 2
  789. %macro CHROMA_INTRA_P0 3
  790. movq m4, %1
  791. pxor m4, %3
  792. pand m4, [pb_1] ; m4 = (p0^q1)&1
  793. pavgb %1, %3
  794. psubusb %1, m4
  795. pavgb %1, %2 ; dst = avg(p1, avg(p0,q1) - ((p0^q1)&1))
  796. %endmacro
  797. %define t5 r4
  798. %define t6 r5
  799. ;------------------------------------------------------------------------------
  800. ; void ff_deblock_v_chroma_intra(uint8_t *pix, int stride, int alpha, int beta)
  801. ;------------------------------------------------------------------------------
  802. cglobal deblock_v_chroma_intra_8, 4,5
  803. CHROMA_V_START
  804. movq m0, [t5]
  805. movq m1, [t5+r1]
  806. movq m2, [r0]
  807. movq m3, [r0+r1]
  808. call ff_chroma_intra_body_mmxext
  809. movq [t5+r1], m1
  810. movq [r0], m2
  811. RET
  812. ;------------------------------------------------------------------------------
  813. ; void ff_deblock_h_chroma_intra(uint8_t *pix, int stride, int alpha, int beta)
  814. ;------------------------------------------------------------------------------
  815. cglobal deblock_h_chroma_intra_8, 4,6
  816. CHROMA_H_START
  817. TRANSPOSE4x8_LOAD bw, wd, dq, PASS8ROWS(t5, r0, r1, t6)
  818. call ff_chroma_intra_body_mmxext
  819. TRANSPOSE8x4B_STORE PASS8ROWS(t5, r0, r1, t6)
  820. RET
  821. ALIGN 16
  822. ff_chroma_intra_body_mmxext:
  823. LOAD_MASK r2d, r3d
  824. movq m5, m1
  825. movq m6, m2
  826. CHROMA_INTRA_P0 m1, m0, m3
  827. CHROMA_INTRA_P0 m2, m3, m0
  828. psubb m1, m5
  829. psubb m2, m6
  830. pand m1, m7
  831. pand m2, m7
  832. paddb m1, m5
  833. paddb m2, m6
  834. ret
  835. ;-----------------------------------------------------------------------------
  836. ; void ff_h264_loop_filter_strength(int16_t bs[2][4][4], uint8_t nnz[40],
  837. ; int8_t ref[2][40], int16_t mv[2][40][2],
  838. ; int bidir, int edges, int step,
  839. ; int mask_mv0, int mask_mv1, int field);
  840. ;
  841. ; bidir is 0 or 1
  842. ; edges is 1 or 4
  843. ; step is 1 or 2
  844. ; mask_mv0 is 0 or 3
  845. ; mask_mv1 is 0 or 1
  846. ; field is 0 or 1
  847. ;-----------------------------------------------------------------------------
  848. %macro loop_filter_strength_iteration 7 ; edges, step, mask_mv,
  849. ; dir, d_idx, mask_dir, bidir
  850. %define edgesd %1
  851. %define stepd %2
  852. %define mask_mvd %3
  853. %define dir %4
  854. %define d_idx %5
  855. %define mask_dir %6
  856. %define bidir %7
  857. xor b_idxd, b_idxd ; for (b_idx = 0; b_idx < edges; b_idx += step)
  858. %%.b_idx_loop:
  859. %if mask_dir == 0
  860. pxor m0, m0
  861. %endif
  862. test b_idxd, dword mask_mvd
  863. jnz %%.skip_loop_iter ; if (!(b_idx & mask_mv))
  864. %if bidir == 1
  865. movd m2, [refq+b_idxq+d_idx+12] ; { ref0[bn] }
  866. punpckldq m2, [refq+b_idxq+d_idx+52] ; { ref0[bn], ref1[bn] }
  867. pshufw m0, [refq+b_idxq+12], 0x44 ; { ref0[b], ref0[b] }
  868. pshufw m1, [refq+b_idxq+52], 0x44 ; { ref1[b], ref1[b] }
  869. pshufw m3, m2, 0x4E ; { ref1[bn], ref0[bn] }
  870. psubb m0, m2 ; { ref0[b] != ref0[bn],
  871. ; ref0[b] != ref1[bn] }
  872. psubb m1, m3 ; { ref1[b] != ref1[bn],
  873. ; ref1[b] != ref0[bn] }
  874. por m0, m1
  875. mova m1, [mvq+b_idxq*4+(d_idx+12)*4]
  876. mova m2, [mvq+b_idxq*4+(d_idx+12)*4+mmsize]
  877. mova m3, m1
  878. mova m4, m2
  879. psubw m1, [mvq+b_idxq*4+12*4]
  880. psubw m2, [mvq+b_idxq*4+12*4+mmsize]
  881. psubw m3, [mvq+b_idxq*4+52*4]
  882. psubw m4, [mvq+b_idxq*4+52*4+mmsize]
  883. packsswb m1, m2
  884. packsswb m3, m4
  885. paddb m1, m6
  886. paddb m3, m6
  887. psubusb m1, m5 ; abs(mv[b] - mv[bn]) >= limit
  888. psubusb m3, m5
  889. packsswb m1, m3
  890. por m0, m1
  891. mova m1, [mvq+b_idxq*4+(d_idx+52)*4]
  892. mova m2, [mvq+b_idxq*4+(d_idx+52)*4+mmsize]
  893. mova m3, m1
  894. mova m4, m2
  895. psubw m1, [mvq+b_idxq*4+12*4]
  896. psubw m2, [mvq+b_idxq*4+12*4+mmsize]
  897. psubw m3, [mvq+b_idxq*4+52*4]
  898. psubw m4, [mvq+b_idxq*4+52*4+mmsize]
  899. packsswb m1, m2
  900. packsswb m3, m4
  901. paddb m1, m6
  902. paddb m3, m6
  903. psubusb m1, m5 ; abs(mv[b] - mv[bn]) >= limit
  904. psubusb m3, m5
  905. packsswb m1, m3
  906. pshufw m1, m1, 0x4E
  907. por m0, m1
  908. pshufw m1, m0, 0x4E
  909. pminub m0, m1
  910. %else ; bidir == 0
  911. movd m0, [refq+b_idxq+12]
  912. psubb m0, [refq+b_idxq+d_idx+12] ; ref[b] != ref[bn]
  913. mova m1, [mvq+b_idxq*4+12*4]
  914. mova m2, [mvq+b_idxq*4+12*4+mmsize]
  915. psubw m1, [mvq+b_idxq*4+(d_idx+12)*4]
  916. psubw m2, [mvq+b_idxq*4+(d_idx+12)*4+mmsize]
  917. packsswb m1, m2
  918. paddb m1, m6
  919. psubusb m1, m5 ; abs(mv[b] - mv[bn]) >= limit
  920. packsswb m1, m1
  921. por m0, m1
  922. %endif ; bidir == 1/0
  923. %%.skip_loop_iter:
  924. movd m1, [nnzq+b_idxq+12]
  925. por m1, [nnzq+b_idxq+d_idx+12] ; nnz[b] || nnz[bn]
  926. pminub m1, m7
  927. pminub m0, m7
  928. psllw m1, 1
  929. pxor m2, m2
  930. pmaxub m1, m0
  931. punpcklbw m1, m2
  932. movq [bsq+b_idxq+32*dir], m1
  933. add b_idxd, dword stepd
  934. cmp b_idxd, dword edgesd
  935. jl %%.b_idx_loop
  936. %endmacro
  937. INIT_MMX mmxext
  938. cglobal h264_loop_filter_strength, 9, 9, 0, bs, nnz, ref, mv, bidir, edges, \
  939. step, mask_mv0, mask_mv1, field
  940. %define b_idxq bidirq
  941. %define b_idxd bidird
  942. cmp dword fieldm, 0
  943. mova m7, [pb_1]
  944. mova m5, [pb_3]
  945. je .nofield
  946. mova m5, [pb_3_1]
  947. .nofield:
  948. mova m6, m5
  949. paddb m5, m5
  950. shl dword stepd, 3
  951. shl dword edgesd, 3
  952. %if ARCH_X86_32
  953. %define mask_mv0d mask_mv0m
  954. %define mask_mv1d mask_mv1m
  955. %endif
  956. shl dword mask_mv1d, 3
  957. shl dword mask_mv0d, 3
  958. cmp dword bidird, 0
  959. jne .bidir
  960. loop_filter_strength_iteration edgesd, stepd, mask_mv1d, 1, -8, 0, 0
  961. loop_filter_strength_iteration 32, 8, mask_mv0d, 0, -1, -1, 0
  962. mova m0, [bsq+mmsize*0]
  963. mova m1, [bsq+mmsize*1]
  964. mova m2, [bsq+mmsize*2]
  965. mova m3, [bsq+mmsize*3]
  966. TRANSPOSE4x4W 0, 1, 2, 3, 4
  967. mova [bsq+mmsize*0], m0
  968. mova [bsq+mmsize*1], m1
  969. mova [bsq+mmsize*2], m2
  970. mova [bsq+mmsize*3], m3
  971. RET
  972. .bidir:
  973. loop_filter_strength_iteration edgesd, stepd, mask_mv1d, 1, -8, 0, 1
  974. loop_filter_strength_iteration 32, 8, mask_mv0d, 0, -1, -1, 1
  975. mova m0, [bsq+mmsize*0]
  976. mova m1, [bsq+mmsize*1]
  977. mova m2, [bsq+mmsize*2]
  978. mova m3, [bsq+mmsize*3]
  979. TRANSPOSE4x4W 0, 1, 2, 3, 4
  980. mova [bsq+mmsize*0], m0
  981. mova [bsq+mmsize*1], m1
  982. mova [bsq+mmsize*2], m2
  983. mova [bsq+mmsize*3], m3
  984. RET