You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

890 lines
22KB

  1. ;*****************************************************************************
  2. ;* MMX/SSE2-optimized H.264 deblocking code
  3. ;*****************************************************************************
  4. ;* Copyright (C) 2005-2008 x264 project
  5. ;*
  6. ;* Authors: Loren Merritt <lorenm@u.washington.edu>
  7. ;* Jason Garrett-Glaser <darkshikari@gmail.com>
  8. ;*
  9. ;* This file is part of FFmpeg.
  10. ;*
  11. ;* FFmpeg is free software; you can redistribute it and/or
  12. ;* modify it under the terms of the GNU Lesser General Public
  13. ;* License as published by the Free Software Foundation; either
  14. ;* version 2.1 of the License, or (at your option) any later version.
  15. ;*
  16. ;* FFmpeg is distributed in the hope that it will be useful,
  17. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. ;* Lesser General Public License for more details.
  20. ;*
  21. ;* You should have received a copy of the GNU Lesser General Public
  22. ;* License along with FFmpeg; if not, write to the Free Software
  23. ;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  24. ;******************************************************************************
  25. %include "x86inc.asm"
  26. %include "x86util.asm"
  27. SECTION_RODATA
  28. cextern pb_0
  29. cextern pb_1
  30. cextern pb_3
  31. cextern pb_A1
  32. SECTION .text
  33. ; expands to [base],...,[base+7*stride]
  34. %define PASS8ROWS(base, base3, stride, stride3) \
  35. [base], [base+stride], [base+stride*2], [base3], \
  36. [base3+stride], [base3+stride*2], [base3+stride3], [base3+stride*4]
  37. ; in: 8 rows of 4 bytes in %1..%8
  38. ; out: 4 rows of 8 bytes in m0..m3
  39. %macro TRANSPOSE4x8_LOAD 8
  40. movd m0, %1
  41. movd m2, %2
  42. movd m1, %3
  43. movd m3, %4
  44. punpcklbw m0, m2
  45. punpcklbw m1, m3
  46. movq m2, m0
  47. punpcklwd m0, m1
  48. punpckhwd m2, m1
  49. movd m4, %5
  50. movd m6, %6
  51. movd m5, %7
  52. movd m7, %8
  53. punpcklbw m4, m6
  54. punpcklbw m5, m7
  55. movq m6, m4
  56. punpcklwd m4, m5
  57. punpckhwd m6, m5
  58. movq m1, m0
  59. movq m3, m2
  60. punpckldq m0, m4
  61. punpckhdq m1, m4
  62. punpckldq m2, m6
  63. punpckhdq m3, m6
  64. %endmacro
  65. ; in: 4 rows of 8 bytes in m0..m3
  66. ; out: 8 rows of 4 bytes in %1..%8
  67. %macro TRANSPOSE8x4_STORE 8
  68. movq m4, m0
  69. movq m5, m1
  70. movq m6, m2
  71. punpckhdq m4, m4
  72. punpckhdq m5, m5
  73. punpckhdq m6, m6
  74. punpcklbw m0, m1
  75. punpcklbw m2, m3
  76. movq m1, m0
  77. punpcklwd m0, m2
  78. punpckhwd m1, m2
  79. movd %1, m0
  80. punpckhdq m0, m0
  81. movd %2, m0
  82. movd %3, m1
  83. punpckhdq m1, m1
  84. movd %4, m1
  85. punpckhdq m3, m3
  86. punpcklbw m4, m5
  87. punpcklbw m6, m3
  88. movq m5, m4
  89. punpcklwd m4, m6
  90. punpckhwd m5, m6
  91. movd %5, m4
  92. punpckhdq m4, m4
  93. movd %6, m4
  94. movd %7, m5
  95. punpckhdq m5, m5
  96. movd %8, m5
  97. %endmacro
  98. %macro SBUTTERFLY3 4
  99. movq %4, %2
  100. punpckl%1 %2, %3
  101. punpckh%1 %4, %3
  102. %endmacro
  103. ; in: 8 rows of 8 (only the middle 6 pels are used) in %1..%8
  104. ; out: 6 rows of 8 in [%9+0*16] .. [%9+5*16]
  105. %macro TRANSPOSE6x8_MEM 9
  106. movq m0, %1
  107. movq m1, %2
  108. movq m2, %3
  109. movq m3, %4
  110. movq m4, %5
  111. movq m5, %6
  112. movq m6, %7
  113. SBUTTERFLY3 bw, m0, m1, m7
  114. SBUTTERFLY3 bw, m2, m3, m1
  115. SBUTTERFLY3 bw, m4, m5, m3
  116. movq [%9+0x10], m1
  117. SBUTTERFLY3 bw, m6, %8, m5
  118. SBUTTERFLY3 wd, m0, m2, m1
  119. SBUTTERFLY3 wd, m4, m6, m2
  120. punpckhdq m0, m4
  121. movq [%9+0x00], m0
  122. SBUTTERFLY3 wd, m7, [%9+0x10], m6
  123. SBUTTERFLY3 wd, m3, m5, m4
  124. SBUTTERFLY3 dq, m7, m3, m0
  125. SBUTTERFLY3 dq, m1, m2, m5
  126. punpckldq m6, m4
  127. movq [%9+0x10], m1
  128. movq [%9+0x20], m5
  129. movq [%9+0x30], m7
  130. movq [%9+0x40], m0
  131. movq [%9+0x50], m6
  132. %endmacro
  133. ; in: 8 rows of 8 in %1..%8
  134. ; out: 8 rows of 8 in %9..%16
  135. %macro TRANSPOSE8x8_MEM 16
  136. movq m0, %1
  137. movq m1, %2
  138. movq m2, %3
  139. movq m3, %4
  140. movq m4, %5
  141. movq m5, %6
  142. movq m6, %7
  143. SBUTTERFLY3 bw, m0, m1, m7
  144. SBUTTERFLY3 bw, m2, m3, m1
  145. SBUTTERFLY3 bw, m4, m5, m3
  146. SBUTTERFLY3 bw, m6, %8, m5
  147. movq %9, m3
  148. SBUTTERFLY3 wd, m0, m2, m3
  149. SBUTTERFLY3 wd, m4, m6, m2
  150. SBUTTERFLY3 wd, m7, m1, m6
  151. movq %11, m2
  152. movq m2, %9
  153. SBUTTERFLY3 wd, m2, m5, m1
  154. SBUTTERFLY3 dq, m0, m4, m5
  155. SBUTTERFLY3 dq, m7, m2, m4
  156. movq %9, m0
  157. movq %10, m5
  158. movq %13, m7
  159. movq %14, m4
  160. SBUTTERFLY3 dq, m3, %11, m0
  161. SBUTTERFLY3 dq, m6, m1, m5
  162. movq %11, m3
  163. movq %12, m0
  164. movq %15, m6
  165. movq %16, m5
  166. %endmacro
  167. ; out: %4 = |%1-%2|>%3
  168. ; clobbers: %5
  169. %macro DIFF_GT 5
  170. mova %5, %2
  171. mova %4, %1
  172. psubusb %5, %1
  173. psubusb %4, %2
  174. por %4, %5
  175. psubusb %4, %3
  176. %endmacro
  177. ; out: %4 = |%1-%2|>%3
  178. ; clobbers: %5
  179. %macro DIFF_GT2 5
  180. mova %5, %2
  181. mova %4, %1
  182. psubusb %5, %1
  183. psubusb %4, %2
  184. psubusb %5, %3
  185. psubusb %4, %3
  186. pcmpeqb %4, %5
  187. %endmacro
  188. %macro SPLATW 1
  189. %ifidn m0, xmm0
  190. pshuflw %1, %1, 0
  191. punpcklqdq %1, %1
  192. %else
  193. pshufw %1, %1, 0
  194. %endif
  195. %endmacro
  196. ; in: m0=p1 m1=p0 m2=q0 m3=q1 %1=alpha-1 %2=beta-1
  197. ; out: m5=beta-1, m7=mask, %3=alpha-1
  198. ; clobbers: m4,m6
  199. %macro LOAD_MASK 2-3
  200. movd m4, %1
  201. movd m5, %2
  202. SPLATW m4
  203. SPLATW m5
  204. packuswb m4, m4 ; 16x alpha-1
  205. packuswb m5, m5 ; 16x beta-1
  206. %if %0>2
  207. mova %3, m4
  208. %endif
  209. DIFF_GT m1, m2, m4, m7, m6 ; |p0-q0| > alpha-1
  210. DIFF_GT m0, m1, m5, m4, m6 ; |p1-p0| > beta-1
  211. por m7, m4
  212. DIFF_GT m3, m2, m5, m4, m6 ; |q1-q0| > beta-1
  213. por m7, m4
  214. pxor m6, m6
  215. pcmpeqb m7, m6
  216. %endmacro
  217. ; in: m0=p1 m1=p0 m2=q0 m3=q1 m7=(tc&mask)
  218. ; out: m1=p0' m2=q0'
  219. ; clobbers: m0,3-6
  220. %macro DEBLOCK_P0_Q0 0
  221. mova m5, m1
  222. pxor m5, m2 ; p0^q0
  223. pand m5, [pb_1] ; (p0^q0)&1
  224. pcmpeqb m4, m4
  225. pxor m3, m4
  226. pavgb m3, m0 ; (p1 - q1 + 256)>>1
  227. pavgb m3, [pb_3] ; (((p1 - q1 + 256)>>1)+4)>>1 = 64+2+(p1-q1)>>2
  228. pxor m4, m1
  229. pavgb m4, m2 ; (q0 - p0 + 256)>>1
  230. pavgb m3, m5
  231. paddusb m3, m4 ; d+128+33
  232. mova m6, [pb_A1]
  233. psubusb m6, m3
  234. psubusb m3, [pb_A1]
  235. pminub m6, m7
  236. pminub m3, m7
  237. psubusb m1, m6
  238. psubusb m2, m3
  239. paddusb m1, m3
  240. paddusb m2, m6
  241. %endmacro
  242. ; in: m1=p0 m2=q0
  243. ; %1=p1 %2=q2 %3=[q2] %4=[q1] %5=tc0 %6=tmp
  244. ; out: [q1] = clip( (q2+((p0+q0+1)>>1))>>1, q1-tc0, q1+tc0 )
  245. ; clobbers: q2, tmp, tc0
  246. %macro LUMA_Q1 6
  247. mova %6, m1
  248. pavgb %6, m2
  249. pavgb %2, %6 ; avg(p2,avg(p0,q0))
  250. pxor %6, %3
  251. pand %6, [pb_1] ; (p2^avg(p0,q0))&1
  252. psubusb %2, %6 ; (p2+((p0+q0+1)>>1))>>1
  253. mova %6, %1
  254. psubusb %6, %5
  255. paddusb %5, %1
  256. pmaxub %2, %6
  257. pminub %2, %5
  258. mova %4, %2
  259. %endmacro
  260. %ifdef ARCH_X86_64
  261. ;-----------------------------------------------------------------------------
  262. ; void x264_deblock_v_luma_sse2( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
  263. ;-----------------------------------------------------------------------------
  264. INIT_XMM
  265. cglobal x264_deblock_v_luma_sse2, 5,5,10
  266. movd m8, [r4] ; tc0
  267. lea r4, [r1*3]
  268. dec r2d ; alpha-1
  269. neg r4
  270. dec r3d ; beta-1
  271. add r4, r0 ; pix-3*stride
  272. mova m0, [r4+r1] ; p1
  273. mova m1, [r4+2*r1] ; p0
  274. mova m2, [r0] ; q0
  275. mova m3, [r0+r1] ; q1
  276. LOAD_MASK r2d, r3d
  277. punpcklbw m8, m8
  278. punpcklbw m8, m8 ; tc = 4x tc0[3], 4x tc0[2], 4x tc0[1], 4x tc0[0]
  279. pcmpeqb m9, m9
  280. pcmpeqb m9, m8
  281. pandn m9, m7
  282. pand m8, m9
  283. movdqa m3, [r4] ; p2
  284. DIFF_GT2 m1, m3, m5, m6, m7 ; |p2-p0| > beta-1
  285. pand m6, m9
  286. mova m7, m8
  287. psubb m7, m6
  288. pand m6, m8
  289. LUMA_Q1 m0, m3, [r4], [r4+r1], m6, m4
  290. movdqa m4, [r0+2*r1] ; q2
  291. DIFF_GT2 m2, m4, m5, m6, m3 ; |q2-q0| > beta-1
  292. pand m6, m9
  293. pand m8, m6
  294. psubb m7, m6
  295. mova m3, [r0+r1]
  296. LUMA_Q1 m3, m4, [r0+2*r1], [r0+r1], m8, m6
  297. DEBLOCK_P0_Q0
  298. mova [r4+2*r1], m1
  299. mova [r0], m2
  300. RET
  301. ;-----------------------------------------------------------------------------
  302. ; void x264_deblock_h_luma_sse2( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
  303. ;-----------------------------------------------------------------------------
  304. INIT_MMX
  305. cglobal x264_deblock_h_luma_sse2, 5,7
  306. movsxd r10, r1d
  307. lea r11, [r10+r10*2]
  308. lea r6, [r0-4]
  309. lea r5, [r0-4+r11]
  310. %ifdef WIN64
  311. sub rsp, 0x98
  312. %define pix_tmp rsp+0x30
  313. %else
  314. sub rsp, 0x68
  315. %define pix_tmp rsp
  316. %endif
  317. ; transpose 6x16 -> tmp space
  318. TRANSPOSE6x8_MEM PASS8ROWS(r6, r5, r10, r11), pix_tmp
  319. lea r6, [r6+r10*8]
  320. lea r5, [r5+r10*8]
  321. TRANSPOSE6x8_MEM PASS8ROWS(r6, r5, r10, r11), pix_tmp+8
  322. ; vertical filter
  323. ; alpha, beta, tc0 are still in r2d, r3d, r4
  324. ; don't backup r6, r5, r10, r11 because x264_deblock_v_luma_sse2 doesn't use them
  325. lea r0, [pix_tmp+0x30]
  326. mov r1d, 0x10
  327. %ifdef WIN64
  328. mov [rsp+0x20], r4
  329. %endif
  330. call x264_deblock_v_luma_sse2
  331. ; transpose 16x4 -> original space (only the middle 4 rows were changed by the filter)
  332. add r6, 2
  333. add r5, 2
  334. movq m0, [pix_tmp+0x18]
  335. movq m1, [pix_tmp+0x28]
  336. movq m2, [pix_tmp+0x38]
  337. movq m3, [pix_tmp+0x48]
  338. TRANSPOSE8x4_STORE PASS8ROWS(r6, r5, r10, r11)
  339. shl r10, 3
  340. sub r6, r10
  341. sub r5, r10
  342. shr r10, 3
  343. movq m0, [pix_tmp+0x10]
  344. movq m1, [pix_tmp+0x20]
  345. movq m2, [pix_tmp+0x30]
  346. movq m3, [pix_tmp+0x40]
  347. TRANSPOSE8x4_STORE PASS8ROWS(r6, r5, r10, r11)
  348. %ifdef WIN64
  349. add rsp, 0x98
  350. %else
  351. add rsp, 0x68
  352. %endif
  353. RET
  354. %else
  355. %macro DEBLOCK_LUMA 3
  356. ;-----------------------------------------------------------------------------
  357. ; void x264_deblock_v8_luma_mmxext( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
  358. ;-----------------------------------------------------------------------------
  359. cglobal x264_deblock_%2_luma_%1, 5,5
  360. lea r4, [r1*3]
  361. dec r2 ; alpha-1
  362. neg r4
  363. dec r3 ; beta-1
  364. add r4, r0 ; pix-3*stride
  365. %assign pad 2*%3+12-(stack_offset&15)
  366. SUB esp, pad
  367. mova m0, [r4+r1] ; p1
  368. mova m1, [r4+2*r1] ; p0
  369. mova m2, [r0] ; q0
  370. mova m3, [r0+r1] ; q1
  371. LOAD_MASK r2, r3
  372. mov r3, r4mp
  373. movd m4, [r3] ; tc0
  374. punpcklbw m4, m4
  375. punpcklbw m4, m4 ; tc = 4x tc0[3], 4x tc0[2], 4x tc0[1], 4x tc0[0]
  376. mova [esp+%3], m4 ; tc
  377. pcmpeqb m3, m3
  378. pcmpgtb m4, m3
  379. pand m4, m7
  380. mova [esp], m4 ; mask
  381. mova m3, [r4] ; p2
  382. DIFF_GT2 m1, m3, m5, m6, m7 ; |p2-p0| > beta-1
  383. pand m6, m4
  384. pand m4, [esp+%3] ; tc
  385. mova m7, m4
  386. psubb m7, m6
  387. pand m6, m4
  388. LUMA_Q1 m0, m3, [r4], [r4+r1], m6, m4
  389. mova m4, [r0+2*r1] ; q2
  390. DIFF_GT2 m2, m4, m5, m6, m3 ; |q2-q0| > beta-1
  391. mova m5, [esp] ; mask
  392. pand m6, m5
  393. mova m5, [esp+%3] ; tc
  394. pand m5, m6
  395. psubb m7, m6
  396. mova m3, [r0+r1]
  397. LUMA_Q1 m3, m4, [r0+2*r1], [r0+r1], m5, m6
  398. DEBLOCK_P0_Q0
  399. mova [r4+2*r1], m1
  400. mova [r0], m2
  401. ADD esp, pad
  402. RET
  403. ;-----------------------------------------------------------------------------
  404. ; void x264_deblock_h_luma_mmxext( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
  405. ;-----------------------------------------------------------------------------
  406. INIT_MMX
  407. cglobal x264_deblock_h_luma_%1, 0,5
  408. mov r0, r0mp
  409. mov r3, r1m
  410. lea r4, [r3*3]
  411. sub r0, 4
  412. lea r1, [r0+r4]
  413. %assign pad 0x78-(stack_offset&15)
  414. SUB esp, pad
  415. %define pix_tmp esp+12
  416. ; transpose 6x16 -> tmp space
  417. TRANSPOSE6x8_MEM PASS8ROWS(r0, r1, r3, r4), pix_tmp
  418. lea r0, [r0+r3*8]
  419. lea r1, [r1+r3*8]
  420. TRANSPOSE6x8_MEM PASS8ROWS(r0, r1, r3, r4), pix_tmp+8
  421. ; vertical filter
  422. lea r0, [pix_tmp+0x30]
  423. PUSH dword r4m
  424. PUSH dword r3m
  425. PUSH dword r2m
  426. PUSH dword 16
  427. PUSH dword r0
  428. call x264_deblock_%2_luma_%1
  429. %ifidn %2, v8
  430. add dword [esp ], 8 ; pix_tmp+0x38
  431. add dword [esp+16], 2 ; tc0+2
  432. call x264_deblock_%2_luma_%1
  433. %endif
  434. ADD esp, 20
  435. ; transpose 16x4 -> original space (only the middle 4 rows were changed by the filter)
  436. mov r0, r0mp
  437. sub r0, 2
  438. lea r1, [r0+r4]
  439. movq m0, [pix_tmp+0x10]
  440. movq m1, [pix_tmp+0x20]
  441. movq m2, [pix_tmp+0x30]
  442. movq m3, [pix_tmp+0x40]
  443. TRANSPOSE8x4_STORE PASS8ROWS(r0, r1, r3, r4)
  444. lea r0, [r0+r3*8]
  445. lea r1, [r1+r3*8]
  446. movq m0, [pix_tmp+0x18]
  447. movq m1, [pix_tmp+0x28]
  448. movq m2, [pix_tmp+0x38]
  449. movq m3, [pix_tmp+0x48]
  450. TRANSPOSE8x4_STORE PASS8ROWS(r0, r1, r3, r4)
  451. ADD esp, pad
  452. RET
  453. %endmacro ; DEBLOCK_LUMA
  454. INIT_MMX
  455. DEBLOCK_LUMA mmxext, v8, 8
  456. INIT_XMM
  457. DEBLOCK_LUMA sse2, v, 16
  458. %endif ; ARCH
  459. %macro LUMA_INTRA_P012 4 ; p0..p3 in memory
  460. mova t0, p2
  461. mova t1, p0
  462. pavgb t0, p1
  463. pavgb t1, q0
  464. pavgb t0, t1 ; ((p2+p1+1)/2 + (p0+q0+1)/2 + 1)/2
  465. mova t5, t1
  466. mova t2, p2
  467. mova t3, p0
  468. paddb t2, p1
  469. paddb t3, q0
  470. paddb t2, t3
  471. mova t3, t2
  472. mova t4, t2
  473. psrlw t2, 1
  474. pavgb t2, mpb_0
  475. pxor t2, t0
  476. pand t2, mpb_1
  477. psubb t0, t2 ; p1' = (p2+p1+p0+q0+2)/4;
  478. mova t1, p2
  479. mova t2, p2
  480. pavgb t1, q1
  481. psubb t2, q1
  482. paddb t3, t3
  483. psubb t3, t2 ; p2+2*p1+2*p0+2*q0+q1
  484. pand t2, mpb_1
  485. psubb t1, t2
  486. pavgb t1, p1
  487. pavgb t1, t5 ; (((p2+q1)/2 + p1+1)/2 + (p0+q0+1)/2 + 1)/2
  488. psrlw t3, 2
  489. pavgb t3, mpb_0
  490. pxor t3, t1
  491. pand t3, mpb_1
  492. psubb t1, t3 ; p0'a = (p2+2*p1+2*p0+2*q0+q1+4)/8
  493. mova t3, p0
  494. mova t2, p0
  495. pxor t3, q1
  496. pavgb t2, q1
  497. pand t3, mpb_1
  498. psubb t2, t3
  499. pavgb t2, p1 ; p0'b = (2*p1+p0+q0+2)/4
  500. pxor t1, t2
  501. pxor t2, p0
  502. pand t1, mask1p
  503. pand t2, mask0
  504. pxor t1, t2
  505. pxor t1, p0
  506. mova %1, t1 ; store p0
  507. mova t1, %4 ; p3
  508. mova t2, t1
  509. pavgb t1, p2
  510. paddb t2, p2
  511. pavgb t1, t0 ; (p3+p2+1)/2 + (p2+p1+p0+q0+2)/4
  512. paddb t2, t2
  513. paddb t2, t4 ; 2*p3+3*p2+p1+p0+q0
  514. psrlw t2, 2
  515. pavgb t2, mpb_0
  516. pxor t2, t1
  517. pand t2, mpb_1
  518. psubb t1, t2 ; p2' = (2*p3+3*p2+p1+p0+q0+4)/8
  519. pxor t0, p1
  520. pxor t1, p2
  521. pand t0, mask1p
  522. pand t1, mask1p
  523. pxor t0, p1
  524. pxor t1, p2
  525. mova %2, t0 ; store p1
  526. mova %3, t1 ; store p2
  527. %endmacro
  528. %macro LUMA_INTRA_SWAP_PQ 0
  529. %define q1 m0
  530. %define q0 m1
  531. %define p0 m2
  532. %define p1 m3
  533. %define p2 q2
  534. %define mask1p mask1q
  535. %endmacro
  536. %macro DEBLOCK_LUMA_INTRA 2
  537. %define p1 m0
  538. %define p0 m1
  539. %define q0 m2
  540. %define q1 m3
  541. %define t0 m4
  542. %define t1 m5
  543. %define t2 m6
  544. %define t3 m7
  545. %ifdef ARCH_X86_64
  546. %define p2 m8
  547. %define q2 m9
  548. %define t4 m10
  549. %define t5 m11
  550. %define mask0 m12
  551. %define mask1p m13
  552. %define mask1q [rsp-24]
  553. %define mpb_0 m14
  554. %define mpb_1 m15
  555. %else
  556. %define spill(x) [esp+16*x+((stack_offset+4)&15)]
  557. %define p2 [r4+r1]
  558. %define q2 [r0+2*r1]
  559. %define t4 spill(0)
  560. %define t5 spill(1)
  561. %define mask0 spill(2)
  562. %define mask1p spill(3)
  563. %define mask1q spill(4)
  564. %define mpb_0 [pb_0]
  565. %define mpb_1 [pb_1]
  566. %endif
  567. ;-----------------------------------------------------------------------------
  568. ; void x264_deblock_v_luma_intra_sse2( uint8_t *pix, int stride, int alpha, int beta )
  569. ;-----------------------------------------------------------------------------
  570. cglobal x264_deblock_%2_luma_intra_%1, 4,6,16
  571. %ifndef ARCH_X86_64
  572. sub esp, 0x60
  573. %endif
  574. lea r4, [r1*4]
  575. lea r5, [r1*3] ; 3*stride
  576. dec r2d ; alpha-1
  577. jl .end
  578. neg r4
  579. dec r3d ; beta-1
  580. jl .end
  581. add r4, r0 ; pix-4*stride
  582. mova p1, [r4+2*r1]
  583. mova p0, [r4+r5]
  584. mova q0, [r0]
  585. mova q1, [r0+r1]
  586. %ifdef ARCH_X86_64
  587. pxor mpb_0, mpb_0
  588. mova mpb_1, [pb_1]
  589. LOAD_MASK r2d, r3d, t5 ; m5=beta-1, t5=alpha-1, m7=mask0
  590. SWAP 7, 12 ; m12=mask0
  591. pavgb t5, mpb_0
  592. pavgb t5, mpb_1 ; alpha/4+1
  593. movdqa p2, [r4+r1]
  594. movdqa q2, [r0+2*r1]
  595. DIFF_GT2 p0, q0, t5, t0, t3 ; t0 = |p0-q0| > alpha/4+1
  596. DIFF_GT2 p0, p2, m5, t2, t5 ; mask1 = |p2-p0| > beta-1
  597. DIFF_GT2 q0, q2, m5, t4, t5 ; t4 = |q2-q0| > beta-1
  598. pand t0, mask0
  599. pand t4, t0
  600. pand t2, t0
  601. mova mask1q, t4
  602. mova mask1p, t2
  603. %else
  604. LOAD_MASK r2d, r3d, t5 ; m5=beta-1, t5=alpha-1, m7=mask0
  605. mova m4, t5
  606. mova mask0, m7
  607. pavgb m4, [pb_0]
  608. pavgb m4, [pb_1] ; alpha/4+1
  609. DIFF_GT2 p0, q0, m4, m6, m7 ; m6 = |p0-q0| > alpha/4+1
  610. pand m6, mask0
  611. DIFF_GT2 p0, p2, m5, m4, m7 ; m4 = |p2-p0| > beta-1
  612. pand m4, m6
  613. mova mask1p, m4
  614. DIFF_GT2 q0, q2, m5, m4, m7 ; m4 = |q2-q0| > beta-1
  615. pand m4, m6
  616. mova mask1q, m4
  617. %endif
  618. LUMA_INTRA_P012 [r4+r5], [r4+2*r1], [r4+r1], [r4]
  619. LUMA_INTRA_SWAP_PQ
  620. LUMA_INTRA_P012 [r0], [r0+r1], [r0+2*r1], [r0+r5]
  621. .end:
  622. %ifndef ARCH_X86_64
  623. add esp, 0x60
  624. %endif
  625. RET
  626. INIT_MMX
  627. %ifdef ARCH_X86_64
  628. ;-----------------------------------------------------------------------------
  629. ; void x264_deblock_h_luma_intra_sse2( uint8_t *pix, int stride, int alpha, int beta )
  630. ;-----------------------------------------------------------------------------
  631. cglobal x264_deblock_h_luma_intra_%1, 4,7
  632. movsxd r10, r1d
  633. lea r11, [r10*3]
  634. lea r6, [r0-4]
  635. lea r5, [r0-4+r11]
  636. sub rsp, 0x88
  637. %define pix_tmp rsp
  638. ; transpose 8x16 -> tmp space
  639. TRANSPOSE8x8_MEM PASS8ROWS(r6, r5, r10, r11), PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30)
  640. lea r6, [r6+r10*8]
  641. lea r5, [r5+r10*8]
  642. TRANSPOSE8x8_MEM PASS8ROWS(r6, r5, r10, r11), PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30)
  643. lea r0, [pix_tmp+0x40]
  644. mov r1, 0x10
  645. call x264_deblock_v_luma_intra_%1
  646. ; transpose 16x6 -> original space (but we can't write only 6 pixels, so really 16x8)
  647. lea r5, [r6+r11]
  648. TRANSPOSE8x8_MEM PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30), PASS8ROWS(r6, r5, r10, r11)
  649. shl r10, 3
  650. sub r6, r10
  651. sub r5, r10
  652. shr r10, 3
  653. TRANSPOSE8x8_MEM PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30), PASS8ROWS(r6, r5, r10, r11)
  654. add rsp, 0x88
  655. RET
  656. %else
  657. cglobal x264_deblock_h_luma_intra_%1, 2,4
  658. lea r3, [r1*3]
  659. sub r0, 4
  660. lea r2, [r0+r3]
  661. %assign pad 0x8c-(stack_offset&15)
  662. SUB rsp, pad
  663. %define pix_tmp rsp
  664. ; transpose 8x16 -> tmp space
  665. TRANSPOSE8x8_MEM PASS8ROWS(r0, r2, r1, r3), PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30)
  666. lea r0, [r0+r1*8]
  667. lea r2, [r2+r1*8]
  668. TRANSPOSE8x8_MEM PASS8ROWS(r0, r2, r1, r3), PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30)
  669. lea r0, [pix_tmp+0x40]
  670. PUSH dword r3m
  671. PUSH dword r2m
  672. PUSH dword 16
  673. PUSH r0
  674. call x264_deblock_%2_luma_intra_%1
  675. %ifidn %2, v8
  676. add dword [rsp], 8 ; pix_tmp+8
  677. call x264_deblock_%2_luma_intra_%1
  678. %endif
  679. ADD esp, 16
  680. mov r1, r1m
  681. mov r0, r0mp
  682. lea r3, [r1*3]
  683. sub r0, 4
  684. lea r2, [r0+r3]
  685. ; transpose 16x6 -> original space (but we can't write only 6 pixels, so really 16x8)
  686. TRANSPOSE8x8_MEM PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30), PASS8ROWS(r0, r2, r1, r3)
  687. lea r0, [r0+r1*8]
  688. lea r2, [r2+r1*8]
  689. TRANSPOSE8x8_MEM PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30), PASS8ROWS(r0, r2, r1, r3)
  690. ADD rsp, pad
  691. RET
  692. %endif ; ARCH_X86_64
  693. %endmacro ; DEBLOCK_LUMA_INTRA
  694. INIT_XMM
  695. DEBLOCK_LUMA_INTRA sse2, v
  696. %ifndef ARCH_X86_64
  697. INIT_MMX
  698. DEBLOCK_LUMA_INTRA mmxext, v8
  699. %endif
  700. INIT_MMX
  701. %macro CHROMA_V_START 0
  702. dec r2d ; alpha-1
  703. dec r3d ; beta-1
  704. mov t5, r0
  705. sub t5, r1
  706. sub t5, r1
  707. %endmacro
  708. %macro CHROMA_H_START 0
  709. dec r2d
  710. dec r3d
  711. sub r0, 2
  712. lea t6, [r1*3]
  713. mov t5, r0
  714. add r0, t6
  715. %endmacro
  716. %define t5 r5
  717. %define t6 r6
  718. ;-----------------------------------------------------------------------------
  719. ; void x264_deblock_v_chroma( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
  720. ;-----------------------------------------------------------------------------
  721. cglobal x264_deblock_v_chroma_mmxext, 5,6
  722. CHROMA_V_START
  723. movq m0, [t5]
  724. movq m1, [t5+r1]
  725. movq m2, [r0]
  726. movq m3, [r0+r1]
  727. call x264_chroma_inter_body_mmxext
  728. movq [t5+r1], m1
  729. movq [r0], m2
  730. RET
  731. ;-----------------------------------------------------------------------------
  732. ; void x264_deblock_h_chroma( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
  733. ;-----------------------------------------------------------------------------
  734. cglobal x264_deblock_h_chroma_mmxext, 5,7
  735. %ifdef ARCH_X86_64
  736. %define buf0 [rsp-24]
  737. %define buf1 [rsp-16]
  738. %else
  739. %define buf0 r0m
  740. %define buf1 r2m
  741. %endif
  742. CHROMA_H_START
  743. TRANSPOSE4x8_LOAD PASS8ROWS(t5, r0, r1, t6)
  744. movq buf0, m0
  745. movq buf1, m3
  746. call x264_chroma_inter_body_mmxext
  747. movq m0, buf0
  748. movq m3, buf1
  749. TRANSPOSE8x4_STORE PASS8ROWS(t5, r0, r1, t6)
  750. RET
  751. ALIGN 16
  752. x264_chroma_inter_body_mmxext:
  753. LOAD_MASK r2d, r3d
  754. movd m6, [r4] ; tc0
  755. punpcklbw m6, m6
  756. pand m7, m6
  757. DEBLOCK_P0_Q0
  758. ret
  759. ; in: %1=p0 %2=p1 %3=q1
  760. ; out: p0 = (p0 + q1 + 2*p1 + 2) >> 2
  761. %macro CHROMA_INTRA_P0 3
  762. movq m4, %1
  763. pxor m4, %3
  764. pand m4, [pb_1] ; m4 = (p0^q1)&1
  765. pavgb %1, %3
  766. psubusb %1, m4
  767. pavgb %1, %2 ; dst = avg(p1, avg(p0,q1) - ((p0^q1)&1))
  768. %endmacro
  769. %define t5 r4
  770. %define t6 r5
  771. ;-----------------------------------------------------------------------------
  772. ; void x264_deblock_v_chroma_intra( uint8_t *pix, int stride, int alpha, int beta )
  773. ;-----------------------------------------------------------------------------
  774. cglobal x264_deblock_v_chroma_intra_mmxext, 4,5
  775. CHROMA_V_START
  776. movq m0, [t5]
  777. movq m1, [t5+r1]
  778. movq m2, [r0]
  779. movq m3, [r0+r1]
  780. call x264_chroma_intra_body_mmxext
  781. movq [t5+r1], m1
  782. movq [r0], m2
  783. RET
  784. ;-----------------------------------------------------------------------------
  785. ; void x264_deblock_h_chroma_intra( uint8_t *pix, int stride, int alpha, int beta )
  786. ;-----------------------------------------------------------------------------
  787. cglobal x264_deblock_h_chroma_intra_mmxext, 4,6
  788. CHROMA_H_START
  789. TRANSPOSE4x8_LOAD PASS8ROWS(t5, r0, r1, t6)
  790. call x264_chroma_intra_body_mmxext
  791. TRANSPOSE8x4_STORE PASS8ROWS(t5, r0, r1, t6)
  792. RET
  793. ALIGN 16
  794. x264_chroma_intra_body_mmxext:
  795. LOAD_MASK r2d, r3d
  796. movq m5, m1
  797. movq m6, m2
  798. CHROMA_INTRA_P0 m1, m0, m3
  799. CHROMA_INTRA_P0 m2, m3, m0
  800. psubb m1, m5
  801. psubb m2, m6
  802. pand m1, m7
  803. pand m2, m7
  804. paddb m1, m5
  805. paddb m2, m6
  806. ret