You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1109 lines
28KB

  1. ;*****************************************************************************
  2. ;* MMX/SSE2-optimized H.264 iDCT
  3. ;*****************************************************************************
  4. ;* Copyright (C) 2004-2005 Michael Niedermayer, Loren Merritt
  5. ;* Copyright (C) 2003-2008 x264 project
  6. ;*
  7. ;* Authors: Laurent Aimar <fenrir@via.ecp.fr>
  8. ;* Loren Merritt <lorenm@u.washington.edu>
  9. ;* Holger Lubitz <hal@duncan.ol.sub.de>
  10. ;* Min Chen <chenm001.163.com>
  11. ;*
  12. ;* This file is part of Libav.
  13. ;*
  14. ;* Libav is free software; you can redistribute it and/or
  15. ;* modify it under the terms of the GNU Lesser General Public
  16. ;* License as published by the Free Software Foundation; either
  17. ;* version 2.1 of the License, or (at your option) any later version.
  18. ;*
  19. ;* Libav is distributed in the hope that it will be useful,
  20. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  22. ;* Lesser General Public License for more details.
  23. ;*
  24. ;* You should have received a copy of the GNU Lesser General Public
  25. ;* License along with Libav; if not, write to the Free Software
  26. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  27. ;*****************************************************************************
  28. %include "libavutil/x86/x86util.asm"
  29. SECTION_RODATA
  30. scan8_mem: db 4+ 1*8, 5+ 1*8, 4+ 2*8, 5+ 2*8
  31. db 6+ 1*8, 7+ 1*8, 6+ 2*8, 7+ 2*8
  32. db 4+ 3*8, 5+ 3*8, 4+ 4*8, 5+ 4*8
  33. db 6+ 3*8, 7+ 3*8, 6+ 4*8, 7+ 4*8
  34. db 4+ 6*8, 5+ 6*8, 4+ 7*8, 5+ 7*8
  35. db 6+ 6*8, 7+ 6*8, 6+ 7*8, 7+ 7*8
  36. db 4+ 8*8, 5+ 8*8, 4+ 9*8, 5+ 9*8
  37. db 6+ 8*8, 7+ 8*8, 6+ 9*8, 7+ 9*8
  38. db 4+11*8, 5+11*8, 4+12*8, 5+12*8
  39. db 6+11*8, 7+11*8, 6+12*8, 7+12*8
  40. db 4+13*8, 5+13*8, 4+14*8, 5+14*8
  41. db 6+13*8, 7+13*8, 6+14*8, 7+14*8
  42. %ifdef PIC
  43. %define npicregs 1
  44. %define scan8 picregq
  45. %else
  46. %define npicregs 0
  47. %define scan8 scan8_mem
  48. %endif
  49. cextern pw_32
  50. cextern pw_1
  51. SECTION .text
  52. ; %1=uint8_t *dst, %2=int16_t *block, %3=int stride
  53. %macro IDCT4_ADD 3
  54. ; Load dct coeffs
  55. movq m0, [%2]
  56. movq m1, [%2+8]
  57. movq m2, [%2+16]
  58. movq m3, [%2+24]
  59. IDCT4_1D w, 0, 1, 2, 3, 4, 5
  60. mova m6, [pw_32]
  61. TRANSPOSE4x4W 0, 1, 2, 3, 4
  62. paddw m0, m6
  63. IDCT4_1D w, 0, 1, 2, 3, 4, 5
  64. pxor m7, m7
  65. movq [%2+ 0], m7
  66. movq [%2+ 8], m7
  67. movq [%2+16], m7
  68. movq [%2+24], m7
  69. STORE_DIFFx2 m0, m1, m4, m5, m7, 6, %1, %3
  70. lea %1, [%1+%3*2]
  71. STORE_DIFFx2 m2, m3, m4, m5, m7, 6, %1, %3
  72. %endmacro
  73. INIT_MMX mmx
  74. ; void ff_h264_idct_add_8_mmx(uint8_t *dst, int16_t *block, int stride)
  75. cglobal h264_idct_add_8, 3, 3, 0
  76. movsxdifnidn r2, r2d
  77. IDCT4_ADD r0, r1, r2
  78. RET
  79. %macro IDCT8_1D 2
  80. mova m0, m1
  81. psraw m1, 1
  82. mova m4, m5
  83. psraw m4, 1
  84. paddw m4, m5
  85. paddw m1, m0
  86. paddw m4, m7
  87. paddw m1, m5
  88. psubw m4, m0
  89. paddw m1, m3
  90. psubw m0, m3
  91. psubw m5, m3
  92. psraw m3, 1
  93. paddw m0, m7
  94. psubw m5, m7
  95. psraw m7, 1
  96. psubw m0, m3
  97. psubw m5, m7
  98. mova m7, m1
  99. psraw m1, 2
  100. mova m3, m4
  101. psraw m3, 2
  102. paddw m3, m0
  103. psraw m0, 2
  104. paddw m1, m5
  105. psraw m5, 2
  106. psubw m0, m4
  107. psubw m7, m5
  108. mova m5, m6
  109. psraw m6, 1
  110. mova m4, m2
  111. psraw m4, 1
  112. paddw m6, m2
  113. psubw m4, m5
  114. mova m2, %1
  115. mova m5, %2
  116. SUMSUB_BA w, 5, 2
  117. SUMSUB_BA w, 6, 5
  118. SUMSUB_BA w, 4, 2
  119. SUMSUB_BA w, 7, 6
  120. SUMSUB_BA w, 0, 4
  121. SUMSUB_BA w, 3, 2
  122. SUMSUB_BA w, 1, 5
  123. SWAP 7, 6, 4, 5, 2, 3, 1, 0 ; 70315246 -> 01234567
  124. %endmacro
  125. %macro IDCT8_1D_FULL 1
  126. mova m7, [%1+112]
  127. mova m6, [%1+ 96]
  128. mova m5, [%1+ 80]
  129. mova m3, [%1+ 48]
  130. mova m2, [%1+ 32]
  131. mova m1, [%1+ 16]
  132. IDCT8_1D [%1], [%1+ 64]
  133. %endmacro
  134. ; %1=int16_t *block, %2=int16_t *dstblock
  135. %macro IDCT8_ADD_MMX_START 2
  136. IDCT8_1D_FULL %1
  137. mova [%1], m7
  138. TRANSPOSE4x4W 0, 1, 2, 3, 7
  139. mova m7, [%1]
  140. mova [%2 ], m0
  141. mova [%2+16], m1
  142. mova [%2+32], m2
  143. mova [%2+48], m3
  144. TRANSPOSE4x4W 4, 5, 6, 7, 3
  145. mova [%2+ 8], m4
  146. mova [%2+24], m5
  147. mova [%2+40], m6
  148. mova [%2+56], m7
  149. %endmacro
  150. ; %1=uint8_t *dst, %2=int16_t *block, %3=int stride
  151. %macro IDCT8_ADD_MMX_END 3-4
  152. IDCT8_1D_FULL %2
  153. mova [%2 ], m5
  154. mova [%2+16], m6
  155. mova [%2+32], m7
  156. pxor m7, m7
  157. %if %0 == 4
  158. movq [%4+ 0], m7
  159. movq [%4+ 8], m7
  160. movq [%4+ 16], m7
  161. movq [%4+ 24], m7
  162. movq [%4+ 32], m7
  163. movq [%4+ 40], m7
  164. movq [%4+ 48], m7
  165. movq [%4+ 56], m7
  166. movq [%4+ 64], m7
  167. movq [%4+ 72], m7
  168. movq [%4+ 80], m7
  169. movq [%4+ 88], m7
  170. movq [%4+ 96], m7
  171. movq [%4+104], m7
  172. movq [%4+112], m7
  173. movq [%4+120], m7
  174. %endif
  175. STORE_DIFFx2 m0, m1, m5, m6, m7, 6, %1, %3
  176. lea %1, [%1+%3*2]
  177. STORE_DIFFx2 m2, m3, m5, m6, m7, 6, %1, %3
  178. mova m0, [%2 ]
  179. mova m1, [%2+16]
  180. mova m2, [%2+32]
  181. lea %1, [%1+%3*2]
  182. STORE_DIFFx2 m4, m0, m5, m6, m7, 6, %1, %3
  183. lea %1, [%1+%3*2]
  184. STORE_DIFFx2 m1, m2, m5, m6, m7, 6, %1, %3
  185. %endmacro
  186. INIT_MMX mmx
  187. ; void ff_h264_idct8_add_8_mmx(uint8_t *dst, int16_t *block, int stride)
  188. cglobal h264_idct8_add_8, 3, 4, 0
  189. movsxdifnidn r2, r2d
  190. %assign pad 128+4-(stack_offset&7)
  191. SUB rsp, pad
  192. add word [r1], 32
  193. IDCT8_ADD_MMX_START r1 , rsp
  194. IDCT8_ADD_MMX_START r1+8, rsp+64
  195. lea r3, [r0+4]
  196. IDCT8_ADD_MMX_END r0 , rsp, r2, r1
  197. IDCT8_ADD_MMX_END r3 , rsp+8, r2
  198. ADD rsp, pad
  199. RET
  200. ; %1=uint8_t *dst, %2=int16_t *block, %3=int stride
  201. %macro IDCT8_ADD_SSE 4
  202. IDCT8_1D_FULL %2
  203. %if ARCH_X86_64
  204. TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
  205. %else
  206. TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [%2], [%2+16]
  207. %endif
  208. paddw m0, [pw_32]
  209. %if ARCH_X86_64 == 0
  210. mova [%2 ], m0
  211. mova [%2+16], m4
  212. IDCT8_1D [%2], [%2+ 16]
  213. mova [%2 ], m6
  214. mova [%2+16], m7
  215. %else
  216. SWAP 0, 8
  217. SWAP 4, 9
  218. IDCT8_1D m8, m9
  219. SWAP 6, 8
  220. SWAP 7, 9
  221. %endif
  222. pxor m7, m7
  223. lea %4, [%3*3]
  224. STORE_DIFF m0, m6, m7, [%1 ]
  225. STORE_DIFF m1, m6, m7, [%1+%3 ]
  226. STORE_DIFF m2, m6, m7, [%1+%3*2]
  227. STORE_DIFF m3, m6, m7, [%1+%4 ]
  228. %if ARCH_X86_64 == 0
  229. mova m0, [%2 ]
  230. mova m1, [%2+16]
  231. %else
  232. SWAP 0, 8
  233. SWAP 1, 9
  234. %endif
  235. mova [%2+ 0], m7
  236. mova [%2+ 16], m7
  237. mova [%2+ 32], m7
  238. mova [%2+ 48], m7
  239. mova [%2+ 64], m7
  240. mova [%2+ 80], m7
  241. mova [%2+ 96], m7
  242. mova [%2+112], m7
  243. lea %1, [%1+%3*4]
  244. STORE_DIFF m4, m6, m7, [%1 ]
  245. STORE_DIFF m5, m6, m7, [%1+%3 ]
  246. STORE_DIFF m0, m6, m7, [%1+%3*2]
  247. STORE_DIFF m1, m6, m7, [%1+%4 ]
  248. %endmacro
  249. INIT_XMM sse2
  250. ; void ff_h264_idct8_add_8_sse2(uint8_t *dst, int16_t *block, int stride)
  251. cglobal h264_idct8_add_8, 3, 4, 10
  252. movsxdifnidn r2, r2d
  253. IDCT8_ADD_SSE r0, r1, r2, r3
  254. RET
  255. %macro DC_ADD_MMXEXT_INIT 2
  256. add %1, 32
  257. sar %1, 6
  258. movd m0, %1d
  259. lea %1, [%2*3]
  260. pshufw m0, m0, 0
  261. pxor m1, m1
  262. psubw m1, m0
  263. packuswb m0, m0
  264. packuswb m1, m1
  265. %endmacro
  266. %macro DC_ADD_MMXEXT_OP 4
  267. %1 m2, [%2 ]
  268. %1 m3, [%2+%3 ]
  269. %1 m4, [%2+%3*2]
  270. %1 m5, [%2+%4 ]
  271. paddusb m2, m0
  272. paddusb m3, m0
  273. paddusb m4, m0
  274. paddusb m5, m0
  275. psubusb m2, m1
  276. psubusb m3, m1
  277. psubusb m4, m1
  278. psubusb m5, m1
  279. %1 [%2 ], m2
  280. %1 [%2+%3 ], m3
  281. %1 [%2+%3*2], m4
  282. %1 [%2+%4 ], m5
  283. %endmacro
  284. INIT_MMX mmxext
  285. ; void ff_h264_idct_dc_add_8_mmxext(uint8_t *dst, int16_t *block, int stride)
  286. %if ARCH_X86_64
  287. cglobal h264_idct_dc_add_8, 3, 4, 0
  288. movsxd r2, r2d
  289. movsx r3, word [r1]
  290. mov dword [r1], 0
  291. DC_ADD_MMXEXT_INIT r3, r2
  292. DC_ADD_MMXEXT_OP movh, r0, r2, r3
  293. RET
  294. ; void ff_h264_idct8_dc_add_8_mmxext(uint8_t *dst, int16_t *block, int stride)
  295. cglobal h264_idct8_dc_add_8, 3, 4, 0
  296. movsxd r2, r2d
  297. movsx r3, word [r1]
  298. mov dword [r1], 0
  299. DC_ADD_MMXEXT_INIT r3, r2
  300. DC_ADD_MMXEXT_OP mova, r0, r2, r3
  301. lea r0, [r0+r2*4]
  302. DC_ADD_MMXEXT_OP mova, r0, r2, r3
  303. RET
  304. %else
  305. ; void ff_h264_idct_dc_add_8_mmxext(uint8_t *dst, int16_t *block, int stride)
  306. cglobal h264_idct_dc_add_8, 2, 3, 0
  307. movsx r2, word [r1]
  308. mov dword [r1], 0
  309. mov r1, r2m
  310. DC_ADD_MMXEXT_INIT r2, r1
  311. DC_ADD_MMXEXT_OP movh, r0, r1, r2
  312. RET
  313. ; void ff_h264_idct8_dc_add_8_mmxext(uint8_t *dst, int16_t *block, int stride)
  314. cglobal h264_idct8_dc_add_8, 2, 3, 0
  315. movsx r2, word [r1]
  316. mov dword [r1], 0
  317. mov r1, r2m
  318. DC_ADD_MMXEXT_INIT r2, r1
  319. DC_ADD_MMXEXT_OP mova, r0, r1, r2
  320. lea r0, [r0+r1*4]
  321. DC_ADD_MMXEXT_OP mova, r0, r1, r2
  322. RET
  323. %endif
  324. INIT_MMX mmx
  325. ; void ff_h264_idct_add16_8_mmx(uint8_t *dst, const int *block_offset,
  326. ; int16_t *block, int stride,
  327. ; const uint8_t nnzc[6 * 8])
  328. cglobal h264_idct_add16_8, 5, 7 + npicregs, 0, dst, block_offset, block, stride, nnzc, cntr, coeff, picreg
  329. movsxdifnidn r3, r3d
  330. xor r5, r5
  331. %ifdef PIC
  332. lea picregq, [scan8_mem]
  333. %endif
  334. .nextblock:
  335. movzx r6, byte [scan8+r5]
  336. movzx r6, byte [r4+r6]
  337. test r6, r6
  338. jz .skipblock
  339. mov r6d, dword [r1+r5*4]
  340. lea r6, [r0+r6]
  341. IDCT4_ADD r6, r2, r3
  342. .skipblock:
  343. inc r5
  344. add r2, 32
  345. cmp r5, 16
  346. jl .nextblock
  347. REP_RET
  348. ; void ff_h264_idct8_add4_8_mmx(uint8_t *dst, const int *block_offset,
  349. ; int16_t *block, int stride,
  350. ; const uint8_t nnzc[6 * 8])
  351. cglobal h264_idct8_add4_8, 5, 7 + npicregs, 0, dst, block_offset, block, stride, nnzc, cntr, coeff, picreg
  352. movsxdifnidn r3, r3d
  353. %assign pad 128+4-(stack_offset&7)
  354. SUB rsp, pad
  355. xor r5, r5
  356. %ifdef PIC
  357. lea picregq, [scan8_mem]
  358. %endif
  359. .nextblock:
  360. movzx r6, byte [scan8+r5]
  361. movzx r6, byte [r4+r6]
  362. test r6, r6
  363. jz .skipblock
  364. mov r6d, dword [r1+r5*4]
  365. add r6, r0
  366. add word [r2], 32
  367. IDCT8_ADD_MMX_START r2 , rsp
  368. IDCT8_ADD_MMX_START r2+8, rsp+64
  369. IDCT8_ADD_MMX_END r6 , rsp, r3, r2
  370. mov r6d, dword [r1+r5*4]
  371. lea r6, [r0+r6+4]
  372. IDCT8_ADD_MMX_END r6 , rsp+8, r3
  373. .skipblock:
  374. add r5, 4
  375. add r2, 128
  376. cmp r5, 16
  377. jl .nextblock
  378. ADD rsp, pad
  379. RET
  380. INIT_MMX mmxext
  381. ; void ff_h264_idct_add16_8_mmxext(uint8_t *dst, const int *block_offset,
  382. ; int16_t *block, int stride,
  383. ; const uint8_t nnzc[6 * 8])
  384. cglobal h264_idct_add16_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
  385. movsxdifnidn r3, r3d
  386. xor r5, r5
  387. %ifdef PIC
  388. lea picregq, [scan8_mem]
  389. %endif
  390. .nextblock:
  391. movzx r6, byte [scan8+r5]
  392. movzx r6, byte [r4+r6]
  393. test r6, r6
  394. jz .skipblock
  395. cmp r6, 1
  396. jnz .no_dc
  397. movsx r6, word [r2]
  398. test r6, r6
  399. jz .no_dc
  400. mov word [r2], 0
  401. DC_ADD_MMXEXT_INIT r6, r3
  402. %if ARCH_X86_64 == 0
  403. %define dst2q r1
  404. %define dst2d r1d
  405. %endif
  406. mov dst2d, dword [r1+r5*4]
  407. lea dst2q, [r0+dst2q]
  408. DC_ADD_MMXEXT_OP movh, dst2q, r3, r6
  409. %if ARCH_X86_64 == 0
  410. mov r1, r1m
  411. %endif
  412. inc r5
  413. add r2, 32
  414. cmp r5, 16
  415. jl .nextblock
  416. REP_RET
  417. .no_dc:
  418. mov r6d, dword [r1+r5*4]
  419. add r6, r0
  420. IDCT4_ADD r6, r2, r3
  421. .skipblock:
  422. inc r5
  423. add r2, 32
  424. cmp r5, 16
  425. jl .nextblock
  426. REP_RET
  427. INIT_MMX mmx
  428. ; void ff_h264_idct_add16intra_8_mmx(uint8_t *dst, const int *block_offset,
  429. ; int16_t *block, int stride,
  430. ; const uint8_t nnzc[6 * 8])
  431. cglobal h264_idct_add16intra_8, 5, 7 + npicregs, 0, dst, block_offset, block, stride, nnzc, cntr, coeff, picreg
  432. movsxdifnidn r3, r3d
  433. xor r5, r5
  434. %ifdef PIC
  435. lea picregq, [scan8_mem]
  436. %endif
  437. .nextblock:
  438. movzx r6, byte [scan8+r5]
  439. movzx r6, byte [r4+r6]
  440. or r6w, word [r2]
  441. test r6, r6
  442. jz .skipblock
  443. mov r6d, dword [r1+r5*4]
  444. add r6, r0
  445. IDCT4_ADD r6, r2, r3
  446. .skipblock:
  447. inc r5
  448. add r2, 32
  449. cmp r5, 16
  450. jl .nextblock
  451. REP_RET
  452. INIT_MMX mmxext
  453. ; void ff_h264_idct_add16intra_8_mmxext(uint8_t *dst, const int *block_offset,
  454. ; int16_t *block, int stride,
  455. ; const uint8_t nnzc[6 * 8])
  456. cglobal h264_idct_add16intra_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
  457. movsxdifnidn r3, r3d
  458. xor r5, r5
  459. %ifdef PIC
  460. lea picregq, [scan8_mem]
  461. %endif
  462. .nextblock:
  463. movzx r6, byte [scan8+r5]
  464. movzx r6, byte [r4+r6]
  465. test r6, r6
  466. jz .try_dc
  467. mov r6d, dword [r1+r5*4]
  468. lea r6, [r0+r6]
  469. IDCT4_ADD r6, r2, r3
  470. inc r5
  471. add r2, 32
  472. cmp r5, 16
  473. jl .nextblock
  474. REP_RET
  475. .try_dc:
  476. movsx r6, word [r2]
  477. test r6, r6
  478. jz .skipblock
  479. mov word [r2], 0
  480. DC_ADD_MMXEXT_INIT r6, r3
  481. %if ARCH_X86_64 == 0
  482. %define dst2q r1
  483. %define dst2d r1d
  484. %endif
  485. mov dst2d, dword [r1+r5*4]
  486. add dst2q, r0
  487. DC_ADD_MMXEXT_OP movh, dst2q, r3, r6
  488. %if ARCH_X86_64 == 0
  489. mov r1, r1m
  490. %endif
  491. .skipblock:
  492. inc r5
  493. add r2, 32
  494. cmp r5, 16
  495. jl .nextblock
  496. REP_RET
  497. ; void ff_h264_idct8_add4_8_mmxext(uint8_t *dst, const int *block_offset,
  498. ; int16_t *block, int stride,
  499. ; const uint8_t nnzc[6 * 8])
  500. cglobal h264_idct8_add4_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
  501. movsxdifnidn r3, r3d
  502. %assign pad 128+4-(stack_offset&7)
  503. SUB rsp, pad
  504. xor r5, r5
  505. %ifdef PIC
  506. lea picregq, [scan8_mem]
  507. %endif
  508. .nextblock:
  509. movzx r6, byte [scan8+r5]
  510. movzx r6, byte [r4+r6]
  511. test r6, r6
  512. jz .skipblock
  513. cmp r6, 1
  514. jnz .no_dc
  515. movsx r6, word [r2]
  516. test r6, r6
  517. jz .no_dc
  518. mov word [r2], 0
  519. DC_ADD_MMXEXT_INIT r6, r3
  520. %if ARCH_X86_64 == 0
  521. %define dst2q r1
  522. %define dst2d r1d
  523. %endif
  524. mov dst2d, dword [r1+r5*4]
  525. lea dst2q, [r0+dst2q]
  526. DC_ADD_MMXEXT_OP mova, dst2q, r3, r6
  527. lea dst2q, [dst2q+r3*4]
  528. DC_ADD_MMXEXT_OP mova, dst2q, r3, r6
  529. %if ARCH_X86_64 == 0
  530. mov r1, r1m
  531. %endif
  532. add r5, 4
  533. add r2, 128
  534. cmp r5, 16
  535. jl .nextblock
  536. ADD rsp, pad
  537. RET
  538. .no_dc:
  539. mov r6d, dword [r1+r5*4]
  540. add r6, r0
  541. add word [r2], 32
  542. IDCT8_ADD_MMX_START r2 , rsp
  543. IDCT8_ADD_MMX_START r2+8, rsp+64
  544. IDCT8_ADD_MMX_END r6 , rsp, r3, r2
  545. mov r6d, dword [r1+r5*4]
  546. lea r6, [r0+r6+4]
  547. IDCT8_ADD_MMX_END r6 , rsp+8, r3
  548. .skipblock:
  549. add r5, 4
  550. add r2, 128
  551. cmp r5, 16
  552. jl .nextblock
  553. ADD rsp, pad
  554. RET
  555. INIT_XMM sse2
  556. ; void ff_h264_idct8_add4_8_sse2(uint8_t *dst, const int *block_offset,
  557. ; int16_t *block, int stride,
  558. ; const uint8_t nnzc[6 * 8])
  559. cglobal h264_idct8_add4_8, 5, 8 + npicregs, 10, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
  560. movsxdifnidn r3, r3d
  561. xor r5, r5
  562. %ifdef PIC
  563. lea picregq, [scan8_mem]
  564. %endif
  565. .nextblock:
  566. movzx r6, byte [scan8+r5]
  567. movzx r6, byte [r4+r6]
  568. test r6, r6
  569. jz .skipblock
  570. cmp r6, 1
  571. jnz .no_dc
  572. movsx r6, word [r2]
  573. test r6, r6
  574. jz .no_dc
  575. INIT_MMX cpuname
  576. mov word [r2], 0
  577. DC_ADD_MMXEXT_INIT r6, r3
  578. %if ARCH_X86_64 == 0
  579. %define dst2q r1
  580. %define dst2d r1d
  581. %endif
  582. mov dst2d, dword [r1+r5*4]
  583. add dst2q, r0
  584. DC_ADD_MMXEXT_OP mova, dst2q, r3, r6
  585. lea dst2q, [dst2q+r3*4]
  586. DC_ADD_MMXEXT_OP mova, dst2q, r3, r6
  587. %if ARCH_X86_64 == 0
  588. mov r1, r1m
  589. %endif
  590. add r5, 4
  591. add r2, 128
  592. cmp r5, 16
  593. jl .nextblock
  594. REP_RET
  595. .no_dc:
  596. INIT_XMM cpuname
  597. mov dst2d, dword [r1+r5*4]
  598. add dst2q, r0
  599. IDCT8_ADD_SSE dst2q, r2, r3, r6
  600. %if ARCH_X86_64 == 0
  601. mov r1, r1m
  602. %endif
  603. .skipblock:
  604. add r5, 4
  605. add r2, 128
  606. cmp r5, 16
  607. jl .nextblock
  608. REP_RET
  609. INIT_MMX mmx
  610. h264_idct_add8_mmx_plane:
  611. movsxdifnidn r3, r3d
  612. .nextblock:
  613. movzx r6, byte [scan8+r5]
  614. movzx r6, byte [r4+r6]
  615. or r6w, word [r2]
  616. test r6, r6
  617. jz .skipblock
  618. %if ARCH_X86_64
  619. mov r0d, dword [r1+r5*4]
  620. add r0, [dst2q]
  621. %else
  622. mov r0, r1m ; XXX r1m here is actually r0m of the calling func
  623. mov r0, [r0]
  624. add r0, dword [r1+r5*4]
  625. %endif
  626. IDCT4_ADD r0, r2, r3
  627. .skipblock:
  628. inc r5
  629. add r2, 32
  630. test r5, 3
  631. jnz .nextblock
  632. rep ret
  633. ; void ff_h264_idct_add8_8_mmx(uint8_t **dest, const int *block_offset,
  634. ; int16_t *block, int stride,
  635. ; const uint8_t nnzc[6 * 8])
  636. cglobal h264_idct_add8_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
  637. movsxdifnidn r3, r3d
  638. mov r5, 16
  639. add r2, 512
  640. %ifdef PIC
  641. lea picregq, [scan8_mem]
  642. %endif
  643. %if ARCH_X86_64
  644. mov dst2q, r0
  645. %endif
  646. call h264_idct_add8_mmx_plane
  647. mov r5, 32
  648. add r2, 384
  649. %if ARCH_X86_64
  650. add dst2q, gprsize
  651. %else
  652. add r0mp, gprsize
  653. %endif
  654. call h264_idct_add8_mmx_plane
  655. RET
  656. h264_idct_add8_mmxext_plane:
  657. movsxdifnidn r3, r3d
  658. .nextblock:
  659. movzx r6, byte [scan8+r5]
  660. movzx r6, byte [r4+r6]
  661. test r6, r6
  662. jz .try_dc
  663. %if ARCH_X86_64
  664. mov r0d, dword [r1+r5*4]
  665. add r0, [dst2q]
  666. %else
  667. mov r0, r1m ; XXX r1m here is actually r0m of the calling func
  668. mov r0, [r0]
  669. add r0, dword [r1+r5*4]
  670. %endif
  671. IDCT4_ADD r0, r2, r3
  672. inc r5
  673. add r2, 32
  674. test r5, 3
  675. jnz .nextblock
  676. rep ret
  677. .try_dc:
  678. movsx r6, word [r2]
  679. test r6, r6
  680. jz .skipblock
  681. mov word [r2], 0
  682. DC_ADD_MMXEXT_INIT r6, r3
  683. %if ARCH_X86_64
  684. mov r0d, dword [r1+r5*4]
  685. add r0, [dst2q]
  686. %else
  687. mov r0, r1m ; XXX r1m here is actually r0m of the calling func
  688. mov r0, [r0]
  689. add r0, dword [r1+r5*4]
  690. %endif
  691. DC_ADD_MMXEXT_OP movh, r0, r3, r6
  692. .skipblock:
  693. inc r5
  694. add r2, 32
  695. test r5, 3
  696. jnz .nextblock
  697. rep ret
  698. INIT_MMX mmxext
  699. ; void ff_h264_idct_add8_8_mmxext(uint8_t **dest, const int *block_offset,
  700. ; int16_t *block, int stride,
  701. ; const uint8_t nnzc[6 * 8])
  702. cglobal h264_idct_add8_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
  703. movsxdifnidn r3, r3d
  704. mov r5, 16
  705. add r2, 512
  706. %if ARCH_X86_64
  707. mov dst2q, r0
  708. %endif
  709. %ifdef PIC
  710. lea picregq, [scan8_mem]
  711. %endif
  712. call h264_idct_add8_mmxext_plane
  713. mov r5, 32
  714. add r2, 384
  715. %if ARCH_X86_64
  716. add dst2q, gprsize
  717. %else
  718. add r0mp, gprsize
  719. %endif
  720. call h264_idct_add8_mmxext_plane
  721. RET
  722. ; r0 = uint8_t *dst, r2 = int16_t *block, r3 = int stride, r6=clobbered
  723. h264_idct_dc_add8_mmxext:
  724. movsxdifnidn r3, r3d
  725. movd m0, [r2 ] ; 0 0 X D
  726. mov word [r2+ 0], 0
  727. punpcklwd m0, [r2+32] ; x X d D
  728. mov word [r2+32], 0
  729. paddsw m0, [pw_32]
  730. psraw m0, 6
  731. punpcklwd m0, m0 ; d d D D
  732. pxor m1, m1 ; 0 0 0 0
  733. psubw m1, m0 ; -d-d-D-D
  734. packuswb m0, m1 ; -d-d-D-D d d D D
  735. pshufw m1, m0, 0xFA ; -d-d-d-d-D-D-D-D
  736. punpcklwd m0, m0 ; d d d d D D D D
  737. lea r6, [r3*3]
  738. DC_ADD_MMXEXT_OP movq, r0, r3, r6
  739. ret
  740. ALIGN 16
  741. INIT_XMM sse2
  742. ; r0 = uint8_t *dst (clobbered), r2 = int16_t *block, r3 = int stride
  743. h264_add8x4_idct_sse2:
  744. movsxdifnidn r3, r3d
  745. movq m0, [r2+ 0]
  746. movq m1, [r2+ 8]
  747. movq m2, [r2+16]
  748. movq m3, [r2+24]
  749. movhps m0, [r2+32]
  750. movhps m1, [r2+40]
  751. movhps m2, [r2+48]
  752. movhps m3, [r2+56]
  753. IDCT4_1D w,0,1,2,3,4,5
  754. TRANSPOSE2x4x4W 0,1,2,3,4
  755. paddw m0, [pw_32]
  756. IDCT4_1D w,0,1,2,3,4,5
  757. pxor m7, m7
  758. mova [r2+ 0], m7
  759. mova [r2+16], m7
  760. mova [r2+32], m7
  761. mova [r2+48], m7
  762. STORE_DIFFx2 m0, m1, m4, m5, m7, 6, r0, r3
  763. lea r0, [r0+r3*2]
  764. STORE_DIFFx2 m2, m3, m4, m5, m7, 6, r0, r3
  765. ret
  766. %macro add16_sse2_cycle 2
  767. movzx r0, word [r4+%2]
  768. test r0, r0
  769. jz .cycle%1end
  770. mov r0d, dword [r1+%1*8]
  771. %if ARCH_X86_64
  772. add r0, r5
  773. %else
  774. add r0, r0m
  775. %endif
  776. call h264_add8x4_idct_sse2
  777. .cycle%1end:
  778. %if %1 < 7
  779. add r2, 64
  780. %endif
  781. %endmacro
  782. ; void ff_h264_idct_add16_8_sse2(uint8_t *dst, const int *block_offset,
  783. ; int16_t *block, int stride,
  784. ; const uint8_t nnzc[6 * 8])
  785. cglobal h264_idct_add16_8, 5, 5 + ARCH_X86_64, 8
  786. movsxdifnidn r3, r3d
  787. %if ARCH_X86_64
  788. mov r5, r0
  789. %endif
  790. ; unrolling of the loop leads to an average performance gain of
  791. ; 20-25%
  792. add16_sse2_cycle 0, 0xc
  793. add16_sse2_cycle 1, 0x14
  794. add16_sse2_cycle 2, 0xe
  795. add16_sse2_cycle 3, 0x16
  796. add16_sse2_cycle 4, 0x1c
  797. add16_sse2_cycle 5, 0x24
  798. add16_sse2_cycle 6, 0x1e
  799. add16_sse2_cycle 7, 0x26
  800. RET
  801. %macro add16intra_sse2_cycle 2
  802. movzx r0, word [r4+%2]
  803. test r0, r0
  804. jz .try%1dc
  805. mov r0d, dword [r1+%1*8]
  806. %if ARCH_X86_64
  807. add r0, r7
  808. %else
  809. add r0, r0m
  810. %endif
  811. call h264_add8x4_idct_sse2
  812. jmp .cycle%1end
  813. .try%1dc:
  814. movsx r0, word [r2 ]
  815. or r0w, word [r2+32]
  816. jz .cycle%1end
  817. mov r0d, dword [r1+%1*8]
  818. %if ARCH_X86_64
  819. add r0, r7
  820. %else
  821. add r0, r0m
  822. %endif
  823. call h264_idct_dc_add8_mmxext
  824. .cycle%1end:
  825. %if %1 < 7
  826. add r2, 64
  827. %endif
  828. %endmacro
  829. ; void ff_h264_idct_add16intra_8_sse2(uint8_t *dst, const int *block_offset,
  830. ; int16_t *block, int stride,
  831. ; const uint8_t nnzc[6 * 8])
  832. cglobal h264_idct_add16intra_8, 5, 7 + ARCH_X86_64, 8
  833. movsxdifnidn r3, r3d
  834. %if ARCH_X86_64
  835. mov r7, r0
  836. %endif
  837. add16intra_sse2_cycle 0, 0xc
  838. add16intra_sse2_cycle 1, 0x14
  839. add16intra_sse2_cycle 2, 0xe
  840. add16intra_sse2_cycle 3, 0x16
  841. add16intra_sse2_cycle 4, 0x1c
  842. add16intra_sse2_cycle 5, 0x24
  843. add16intra_sse2_cycle 6, 0x1e
  844. add16intra_sse2_cycle 7, 0x26
  845. RET
  846. %macro add8_sse2_cycle 2
  847. movzx r0, word [r4+%2]
  848. test r0, r0
  849. jz .try%1dc
  850. %if ARCH_X86_64
  851. mov r0d, dword [r1+(%1&1)*8+64*(1+(%1>>1))]
  852. add r0, [r7]
  853. %else
  854. mov r0, r0m
  855. mov r0, [r0]
  856. add r0, dword [r1+(%1&1)*8+64*(1+(%1>>1))]
  857. %endif
  858. call h264_add8x4_idct_sse2
  859. jmp .cycle%1end
  860. .try%1dc:
  861. movsx r0, word [r2 ]
  862. or r0w, word [r2+32]
  863. jz .cycle%1end
  864. %if ARCH_X86_64
  865. mov r0d, dword [r1+(%1&1)*8+64*(1+(%1>>1))]
  866. add r0, [r7]
  867. %else
  868. mov r0, r0m
  869. mov r0, [r0]
  870. add r0, dword [r1+(%1&1)*8+64*(1+(%1>>1))]
  871. %endif
  872. call h264_idct_dc_add8_mmxext
  873. .cycle%1end:
  874. %if %1 == 1
  875. add r2, 384+64
  876. %elif %1 < 3
  877. add r2, 64
  878. %endif
  879. %endmacro
  880. ; void ff_h264_idct_add8_8_sse2(uint8_t **dest, const int *block_offset,
  881. ; int16_t *block, int stride,
  882. ; const uint8_t nnzc[6 * 8])
  883. cglobal h264_idct_add8_8, 5, 7 + ARCH_X86_64, 8
  884. movsxdifnidn r3, r3d
  885. add r2, 512
  886. %if ARCH_X86_64
  887. mov r7, r0
  888. %endif
  889. add8_sse2_cycle 0, 0x34
  890. add8_sse2_cycle 1, 0x3c
  891. %if ARCH_X86_64
  892. add r7, gprsize
  893. %else
  894. add r0mp, gprsize
  895. %endif
  896. add8_sse2_cycle 2, 0x5c
  897. add8_sse2_cycle 3, 0x64
  898. RET
  899. ;void ff_h264_luma_dc_dequant_idct_mmx(int16_t *output, int16_t *input, int qmul)
  900. %macro WALSH4_1D 5
  901. SUMSUB_BADC w, %4, %3, %2, %1, %5
  902. SUMSUB_BADC w, %4, %2, %3, %1, %5
  903. SWAP %1, %4, %3
  904. %endmacro
  905. %macro DEQUANT 1-3
  906. %if cpuflag(sse2)
  907. movd xmm4, t3d
  908. movq xmm5, [pw_1]
  909. pshufd xmm4, xmm4, 0
  910. movq2dq xmm0, m0
  911. movq2dq xmm1, m1
  912. movq2dq xmm2, m2
  913. movq2dq xmm3, m3
  914. punpcklwd xmm0, xmm5
  915. punpcklwd xmm1, xmm5
  916. punpcklwd xmm2, xmm5
  917. punpcklwd xmm3, xmm5
  918. pmaddwd xmm0, xmm4
  919. pmaddwd xmm1, xmm4
  920. pmaddwd xmm2, xmm4
  921. pmaddwd xmm3, xmm4
  922. psrad xmm0, %1
  923. psrad xmm1, %1
  924. psrad xmm2, %1
  925. psrad xmm3, %1
  926. packssdw xmm0, xmm1
  927. packssdw xmm2, xmm3
  928. %else
  929. mova m7, [pw_1]
  930. mova m4, %1
  931. punpcklwd %1, m7
  932. punpckhwd m4, m7
  933. mova m5, %2
  934. punpcklwd %2, m7
  935. punpckhwd m5, m7
  936. movd m7, t3d
  937. punpckldq m7, m7
  938. pmaddwd %1, m7
  939. pmaddwd %2, m7
  940. pmaddwd m4, m7
  941. pmaddwd m5, m7
  942. psrad %1, %3
  943. psrad %2, %3
  944. psrad m4, %3
  945. psrad m5, %3
  946. packssdw %1, m4
  947. packssdw %2, m5
  948. %endif
  949. %endmacro
  950. %macro STORE_WORDS 5-9
  951. %if cpuflag(sse)
  952. movd t0d, %1
  953. psrldq %1, 4
  954. movd t1d, %1
  955. psrldq %1, 4
  956. mov [t2+%2*32], t0w
  957. mov [t2+%4*32], t1w
  958. shr t0d, 16
  959. shr t1d, 16
  960. mov [t2+%3*32], t0w
  961. mov [t2+%5*32], t1w
  962. movd t0d, %1
  963. psrldq %1, 4
  964. movd t1d, %1
  965. mov [t2+%6*32], t0w
  966. mov [t2+%8*32], t1w
  967. shr t0d, 16
  968. shr t1d, 16
  969. mov [t2+%7*32], t0w
  970. mov [t2+%9*32], t1w
  971. %else
  972. movd t0d, %1
  973. psrlq %1, 32
  974. movd t1d, %1
  975. mov [t2+%2*32], t0w
  976. mov [t2+%4*32], t1w
  977. shr t0d, 16
  978. shr t1d, 16
  979. mov [t2+%3*32], t0w
  980. mov [t2+%5*32], t1w
  981. %endif
  982. %endmacro
  983. %macro DEQUANT_STORE 1
  984. %if cpuflag(sse2)
  985. DEQUANT %1
  986. STORE_WORDS xmm0, 0, 1, 4, 5, 2, 3, 6, 7
  987. STORE_WORDS xmm2, 8, 9, 12, 13, 10, 11, 14, 15
  988. %else
  989. DEQUANT m0, m1, %1
  990. STORE_WORDS m0, 0, 1, 4, 5
  991. STORE_WORDS m1, 2, 3, 6, 7
  992. DEQUANT m2, m3, %1
  993. STORE_WORDS m2, 8, 9, 12, 13
  994. STORE_WORDS m3, 10, 11, 14, 15
  995. %endif
  996. %endmacro
  997. %macro IDCT_DC_DEQUANT 1
  998. cglobal h264_luma_dc_dequant_idct, 3, 4, %1
  999. ; manually spill XMM registers for Win64 because
  1000. ; the code here is initialized with INIT_MMX
  1001. WIN64_SPILL_XMM %1
  1002. movq m3, [r1+24]
  1003. movq m2, [r1+16]
  1004. movq m1, [r1+ 8]
  1005. movq m0, [r1+ 0]
  1006. WALSH4_1D 0,1,2,3,4
  1007. TRANSPOSE4x4W 0,1,2,3,4
  1008. WALSH4_1D 0,1,2,3,4
  1009. ; shift, tmp, output, qmul
  1010. %if WIN64
  1011. DECLARE_REG_TMP 0,3,1,2
  1012. ; we can't avoid this, because r0 is the shift register (ecx) on win64
  1013. xchg r0, t2
  1014. %elif ARCH_X86_64
  1015. DECLARE_REG_TMP 3,1,0,2
  1016. %else
  1017. DECLARE_REG_TMP 1,3,0,2
  1018. %endif
  1019. cmp t3d, 32767
  1020. jg .big_qmul
  1021. add t3d, 128 << 16
  1022. DEQUANT_STORE 8
  1023. RET
  1024. .big_qmul:
  1025. bsr t0d, t3d
  1026. add t3d, 128 << 16
  1027. mov t1d, 7
  1028. cmp t0d, t1d
  1029. cmovg t0d, t1d
  1030. inc t1d
  1031. shr t3d, t0b
  1032. sub t1d, t0d
  1033. %if cpuflag(sse2)
  1034. movd xmm6, t1d
  1035. DEQUANT_STORE xmm6
  1036. %else
  1037. movd m6, t1d
  1038. DEQUANT_STORE m6
  1039. %endif
  1040. RET
  1041. %endmacro
  1042. INIT_MMX mmx
  1043. IDCT_DC_DEQUANT 0
  1044. INIT_MMX sse2
  1045. IDCT_DC_DEQUANT 7