You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1536 lines
43KB

  1. ;*****************************************************************************
  2. ;* x86inc.asm: x264asm abstraction layer
  3. ;*****************************************************************************
  4. ;* Copyright (C) 2005-2016 x264 project
  5. ;*
  6. ;* Authors: Loren Merritt <lorenm@u.washington.edu>
  7. ;* Anton Mitrofanov <BugMaster@narod.ru>
  8. ;* Fiona Glaser <fiona@x264.com>
  9. ;* Henrik Gramner <henrik@gramner.com>
  10. ;*
  11. ;* Permission to use, copy, modify, and/or distribute this software for any
  12. ;* purpose with or without fee is hereby granted, provided that the above
  13. ;* copyright notice and this permission notice appear in all copies.
  14. ;*
  15. ;* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  16. ;* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  17. ;* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  18. ;* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  19. ;* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  20. ;* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  21. ;* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  22. ;*****************************************************************************
  23. ; This is a header file for the x264ASM assembly language, which uses
  24. ; NASM/YASM syntax combined with a large number of macros to provide easy
  25. ; abstraction between different calling conventions (x86_32, win64, linux64).
  26. ; It also has various other useful features to simplify writing the kind of
  27. ; DSP functions that are most often used in x264.
  28. ; Unlike the rest of x264, this file is available under an ISC license, as it
  29. ; has significant usefulness outside of x264 and we want it to be available
  30. ; to the largest audience possible. Of course, if you modify it for your own
  31. ; purposes to add a new feature, we strongly encourage contributing a patch
  32. ; as this feature might be useful for others as well. Send patches or ideas
  33. ; to x264-devel@videolan.org .
  34. %ifndef private_prefix
  35. %define private_prefix x264
  36. %endif
  37. %ifndef public_prefix
  38. %define public_prefix private_prefix
  39. %endif
  40. %if HAVE_ALIGNED_STACK
  41. %define STACK_ALIGNMENT 16
  42. %endif
  43. %ifndef STACK_ALIGNMENT
  44. %if ARCH_X86_64
  45. %define STACK_ALIGNMENT 16
  46. %else
  47. %define STACK_ALIGNMENT 4
  48. %endif
  49. %endif
  50. %define WIN64 0
  51. %define UNIX64 0
  52. %if ARCH_X86_64
  53. %ifidn __OUTPUT_FORMAT__,win32
  54. %define WIN64 1
  55. %elifidn __OUTPUT_FORMAT__,win64
  56. %define WIN64 1
  57. %elifidn __OUTPUT_FORMAT__,x64
  58. %define WIN64 1
  59. %else
  60. %define UNIX64 1
  61. %endif
  62. %endif
  63. %define FORMAT_ELF 0
  64. %ifidn __OUTPUT_FORMAT__,elf
  65. %define FORMAT_ELF 1
  66. %elifidn __OUTPUT_FORMAT__,elf32
  67. %define FORMAT_ELF 1
  68. %elifidn __OUTPUT_FORMAT__,elf64
  69. %define FORMAT_ELF 1
  70. %endif
  71. %ifdef PREFIX
  72. %define mangle(x) _ %+ x
  73. %else
  74. %define mangle(x) x
  75. %endif
  76. ; aout does not support align=
  77. ; NOTE: This section is out of sync with x264, in order to
  78. ; keep supporting OS/2.
  79. %macro SECTION_RODATA 0-1 16
  80. %ifidn __OUTPUT_FORMAT__,aout
  81. section .text
  82. %else
  83. SECTION .rodata align=%1
  84. %endif
  85. %endmacro
  86. %if WIN64
  87. %define PIC
  88. %elif ARCH_X86_64 == 0
  89. ; x86_32 doesn't require PIC.
  90. ; Some distros prefer shared objects to be PIC, but nothing breaks if
  91. ; the code contains a few textrels, so we'll skip that complexity.
  92. %undef PIC
  93. %endif
  94. %ifdef PIC
  95. default rel
  96. %endif
  97. %macro CPUNOP 1
  98. %if HAVE_CPUNOP
  99. CPU %1
  100. %endif
  101. %endmacro
  102. ; Macros to eliminate most code duplication between x86_32 and x86_64:
  103. ; Currently this works only for leaf functions which load all their arguments
  104. ; into registers at the start, and make no other use of the stack. Luckily that
  105. ; covers most of x264's asm.
  106. ; PROLOGUE:
  107. ; %1 = number of arguments. loads them from stack if needed.
  108. ; %2 = number of registers used. pushes callee-saved regs if needed.
  109. ; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed.
  110. ; %4 = (optional) stack size to be allocated. The stack will be aligned before
  111. ; allocating the specified stack size. If the required stack alignment is
  112. ; larger than the known stack alignment the stack will be manually aligned
  113. ; and an extra register will be allocated to hold the original stack
  114. ; pointer (to not invalidate r0m etc.). To prevent the use of an extra
  115. ; register as stack pointer, request a negative stack size.
  116. ; %4+/%5+ = list of names to define to registers
  117. ; PROLOGUE can also be invoked by adding the same options to cglobal
  118. ; e.g.
  119. ; cglobal foo, 2,3,7,0x40, dst, src, tmp
  120. ; declares a function (foo) that automatically loads two arguments (dst and
  121. ; src) into registers, uses one additional register (tmp) plus 7 vector
  122. ; registers (m0-m6) and allocates 0x40 bytes of stack space.
  123. ; TODO Some functions can use some args directly from the stack. If they're the
  124. ; last args then you can just not declare them, but if they're in the middle
  125. ; we need more flexible macro.
  126. ; RET:
  127. ; Pops anything that was pushed by PROLOGUE, and returns.
  128. ; REP_RET:
  129. ; Use this instead of RET if it's a branch target.
  130. ; registers:
  131. ; rN and rNq are the native-size register holding function argument N
  132. ; rNd, rNw, rNb are dword, word, and byte size
  133. ; rNh is the high 8 bits of the word size
  134. ; rNm is the original location of arg N (a register or on the stack), dword
  135. ; rNmp is native size
  136. %macro DECLARE_REG 2-3
  137. %define r%1q %2
  138. %define r%1d %2d
  139. %define r%1w %2w
  140. %define r%1b %2b
  141. %define r%1h %2h
  142. %define %2q %2
  143. %if %0 == 2
  144. %define r%1m %2d
  145. %define r%1mp %2
  146. %elif ARCH_X86_64 ; memory
  147. %define r%1m [rstk + stack_offset + %3]
  148. %define r%1mp qword r %+ %1 %+ m
  149. %else
  150. %define r%1m [rstk + stack_offset + %3]
  151. %define r%1mp dword r %+ %1 %+ m
  152. %endif
  153. %define r%1 %2
  154. %endmacro
  155. %macro DECLARE_REG_SIZE 3
  156. %define r%1q r%1
  157. %define e%1q r%1
  158. %define r%1d e%1
  159. %define e%1d e%1
  160. %define r%1w %1
  161. %define e%1w %1
  162. %define r%1h %3
  163. %define e%1h %3
  164. %define r%1b %2
  165. %define e%1b %2
  166. %if ARCH_X86_64 == 0
  167. %define r%1 e%1
  168. %endif
  169. %endmacro
  170. DECLARE_REG_SIZE ax, al, ah
  171. DECLARE_REG_SIZE bx, bl, bh
  172. DECLARE_REG_SIZE cx, cl, ch
  173. DECLARE_REG_SIZE dx, dl, dh
  174. DECLARE_REG_SIZE si, sil, null
  175. DECLARE_REG_SIZE di, dil, null
  176. DECLARE_REG_SIZE bp, bpl, null
  177. ; t# defines for when per-arch register allocation is more complex than just function arguments
  178. %macro DECLARE_REG_TMP 1-*
  179. %assign %%i 0
  180. %rep %0
  181. CAT_XDEFINE t, %%i, r%1
  182. %assign %%i %%i+1
  183. %rotate 1
  184. %endrep
  185. %endmacro
  186. %macro DECLARE_REG_TMP_SIZE 0-*
  187. %rep %0
  188. %define t%1q t%1 %+ q
  189. %define t%1d t%1 %+ d
  190. %define t%1w t%1 %+ w
  191. %define t%1h t%1 %+ h
  192. %define t%1b t%1 %+ b
  193. %rotate 1
  194. %endrep
  195. %endmacro
  196. DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14
  197. %if ARCH_X86_64
  198. %define gprsize 8
  199. %else
  200. %define gprsize 4
  201. %endif
  202. %macro PUSH 1
  203. push %1
  204. %ifidn rstk, rsp
  205. %assign stack_offset stack_offset+gprsize
  206. %endif
  207. %endmacro
  208. %macro POP 1
  209. pop %1
  210. %ifidn rstk, rsp
  211. %assign stack_offset stack_offset-gprsize
  212. %endif
  213. %endmacro
  214. %macro PUSH_IF_USED 1-*
  215. %rep %0
  216. %if %1 < regs_used
  217. PUSH r%1
  218. %endif
  219. %rotate 1
  220. %endrep
  221. %endmacro
  222. %macro POP_IF_USED 1-*
  223. %rep %0
  224. %if %1 < regs_used
  225. pop r%1
  226. %endif
  227. %rotate 1
  228. %endrep
  229. %endmacro
  230. %macro LOAD_IF_USED 1-*
  231. %rep %0
  232. %if %1 < num_args
  233. mov r%1, r %+ %1 %+ mp
  234. %endif
  235. %rotate 1
  236. %endrep
  237. %endmacro
  238. %macro SUB 2
  239. sub %1, %2
  240. %ifidn %1, rstk
  241. %assign stack_offset stack_offset+(%2)
  242. %endif
  243. %endmacro
  244. %macro ADD 2
  245. add %1, %2
  246. %ifidn %1, rstk
  247. %assign stack_offset stack_offset-(%2)
  248. %endif
  249. %endmacro
  250. %macro movifnidn 2
  251. %ifnidn %1, %2
  252. mov %1, %2
  253. %endif
  254. %endmacro
  255. %macro movsxdifnidn 2
  256. %ifnidn %1, %2
  257. movsxd %1, %2
  258. %endif
  259. %endmacro
  260. %macro ASSERT 1
  261. %if (%1) == 0
  262. %error assertion ``%1'' failed
  263. %endif
  264. %endmacro
  265. %macro DEFINE_ARGS 0-*
  266. %ifdef n_arg_names
  267. %assign %%i 0
  268. %rep n_arg_names
  269. CAT_UNDEF arg_name %+ %%i, q
  270. CAT_UNDEF arg_name %+ %%i, d
  271. CAT_UNDEF arg_name %+ %%i, w
  272. CAT_UNDEF arg_name %+ %%i, h
  273. CAT_UNDEF arg_name %+ %%i, b
  274. CAT_UNDEF arg_name %+ %%i, m
  275. CAT_UNDEF arg_name %+ %%i, mp
  276. CAT_UNDEF arg_name, %%i
  277. %assign %%i %%i+1
  278. %endrep
  279. %endif
  280. %xdefine %%stack_offset stack_offset
  281. %undef stack_offset ; so that the current value of stack_offset doesn't get baked in by xdefine
  282. %assign %%i 0
  283. %rep %0
  284. %xdefine %1q r %+ %%i %+ q
  285. %xdefine %1d r %+ %%i %+ d
  286. %xdefine %1w r %+ %%i %+ w
  287. %xdefine %1h r %+ %%i %+ h
  288. %xdefine %1b r %+ %%i %+ b
  289. %xdefine %1m r %+ %%i %+ m
  290. %xdefine %1mp r %+ %%i %+ mp
  291. CAT_XDEFINE arg_name, %%i, %1
  292. %assign %%i %%i+1
  293. %rotate 1
  294. %endrep
  295. %xdefine stack_offset %%stack_offset
  296. %assign n_arg_names %0
  297. %endmacro
  298. %define required_stack_alignment ((mmsize + 15) & ~15)
  299. %macro ALLOC_STACK 1-2 0 ; stack_size, n_xmm_regs (for win64 only)
  300. %ifnum %1
  301. %if %1 != 0
  302. %assign %%pad 0
  303. %assign stack_size %1
  304. %if stack_size < 0
  305. %assign stack_size -stack_size
  306. %endif
  307. %if WIN64
  308. %assign %%pad %%pad + 32 ; shadow space
  309. %if mmsize != 8
  310. %assign xmm_regs_used %2
  311. %if xmm_regs_used > 8
  312. %assign %%pad %%pad + (xmm_regs_used-8)*16 ; callee-saved xmm registers
  313. %endif
  314. %endif
  315. %endif
  316. %if required_stack_alignment <= STACK_ALIGNMENT
  317. ; maintain the current stack alignment
  318. %assign stack_size_padded stack_size + %%pad + ((-%%pad-stack_offset-gprsize) & (STACK_ALIGNMENT-1))
  319. SUB rsp, stack_size_padded
  320. %else
  321. %assign %%reg_num (regs_used - 1)
  322. %xdefine rstk r %+ %%reg_num
  323. ; align stack, and save original stack location directly above
  324. ; it, i.e. in [rsp+stack_size_padded], so we can restore the
  325. ; stack in a single instruction (i.e. mov rsp, rstk or mov
  326. ; rsp, [rsp+stack_size_padded])
  327. %if %1 < 0 ; need to store rsp on stack
  328. %xdefine rstkm [rsp + stack_size + %%pad]
  329. %assign %%pad %%pad + gprsize
  330. %else ; can keep rsp in rstk during whole function
  331. %xdefine rstkm rstk
  332. %endif
  333. %assign stack_size_padded stack_size + ((%%pad + required_stack_alignment-1) & ~(required_stack_alignment-1))
  334. mov rstk, rsp
  335. and rsp, ~(required_stack_alignment-1)
  336. sub rsp, stack_size_padded
  337. movifnidn rstkm, rstk
  338. %endif
  339. WIN64_PUSH_XMM
  340. %endif
  341. %endif
  342. %endmacro
  343. %macro SETUP_STACK_POINTER 1
  344. %ifnum %1
  345. %if %1 != 0 && required_stack_alignment > STACK_ALIGNMENT
  346. %if %1 > 0
  347. %assign regs_used (regs_used + 1)
  348. %endif
  349. %if ARCH_X86_64 && regs_used < 5 + UNIX64 * 3
  350. ; Ensure that we don't clobber any registers containing arguments. For UNIX64 we also preserve r6 (rax)
  351. ; since it's used as a hidden argument in vararg functions to specify the number of vector registers used.
  352. %assign regs_used 5 + UNIX64 * 3
  353. %endif
  354. %endif
  355. %endif
  356. %endmacro
  357. %macro DEFINE_ARGS_INTERNAL 3+
  358. %ifnum %2
  359. DEFINE_ARGS %3
  360. %elif %1 == 4
  361. DEFINE_ARGS %2
  362. %elif %1 > 4
  363. DEFINE_ARGS %2, %3
  364. %endif
  365. %endmacro
  366. %if WIN64 ; Windows x64 ;=================================================
  367. DECLARE_REG 0, rcx
  368. DECLARE_REG 1, rdx
  369. DECLARE_REG 2, R8
  370. DECLARE_REG 3, R9
  371. DECLARE_REG 4, R10, 40
  372. DECLARE_REG 5, R11, 48
  373. DECLARE_REG 6, rax, 56
  374. DECLARE_REG 7, rdi, 64
  375. DECLARE_REG 8, rsi, 72
  376. DECLARE_REG 9, rbx, 80
  377. DECLARE_REG 10, rbp, 88
  378. DECLARE_REG 11, R12, 96
  379. DECLARE_REG 12, R13, 104
  380. DECLARE_REG 13, R14, 112
  381. DECLARE_REG 14, R15, 120
  382. %macro PROLOGUE 2-5+ 0 ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
  383. %assign num_args %1
  384. %assign regs_used %2
  385. ASSERT regs_used >= num_args
  386. SETUP_STACK_POINTER %4
  387. ASSERT regs_used <= 15
  388. PUSH_IF_USED 7, 8, 9, 10, 11, 12, 13, 14
  389. ALLOC_STACK %4, %3
  390. %if mmsize != 8 && stack_size == 0
  391. WIN64_SPILL_XMM %3
  392. %endif
  393. LOAD_IF_USED 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
  394. DEFINE_ARGS_INTERNAL %0, %4, %5
  395. %endmacro
  396. %macro WIN64_PUSH_XMM 0
  397. ; Use the shadow space to store XMM6 and XMM7, the rest needs stack space allocated.
  398. %if xmm_regs_used > 6
  399. movaps [rstk + stack_offset + 8], xmm6
  400. %endif
  401. %if xmm_regs_used > 7
  402. movaps [rstk + stack_offset + 24], xmm7
  403. %endif
  404. %if xmm_regs_used > 8
  405. %assign %%i 8
  406. %rep xmm_regs_used-8
  407. movaps [rsp + (%%i-8)*16 + stack_size + 32], xmm %+ %%i
  408. %assign %%i %%i+1
  409. %endrep
  410. %endif
  411. %endmacro
  412. %macro WIN64_SPILL_XMM 1
  413. %assign xmm_regs_used %1
  414. ASSERT xmm_regs_used <= 16
  415. %if xmm_regs_used > 8
  416. ; Allocate stack space for callee-saved xmm registers plus shadow space and align the stack.
  417. %assign %%pad (xmm_regs_used-8)*16 + 32
  418. %assign stack_size_padded %%pad + ((-%%pad-stack_offset-gprsize) & (STACK_ALIGNMENT-1))
  419. SUB rsp, stack_size_padded
  420. %endif
  421. WIN64_PUSH_XMM
  422. %endmacro
  423. %macro WIN64_RESTORE_XMM_INTERNAL 1
  424. %assign %%pad_size 0
  425. %if xmm_regs_used > 8
  426. %assign %%i xmm_regs_used
  427. %rep xmm_regs_used-8
  428. %assign %%i %%i-1
  429. movaps xmm %+ %%i, [%1 + (%%i-8)*16 + stack_size + 32]
  430. %endrep
  431. %endif
  432. %if stack_size_padded > 0
  433. %if stack_size > 0 && required_stack_alignment > STACK_ALIGNMENT
  434. mov rsp, rstkm
  435. %else
  436. add %1, stack_size_padded
  437. %assign %%pad_size stack_size_padded
  438. %endif
  439. %endif
  440. %if xmm_regs_used > 7
  441. movaps xmm7, [%1 + stack_offset - %%pad_size + 24]
  442. %endif
  443. %if xmm_regs_used > 6
  444. movaps xmm6, [%1 + stack_offset - %%pad_size + 8]
  445. %endif
  446. %endmacro
  447. %macro WIN64_RESTORE_XMM 1
  448. WIN64_RESTORE_XMM_INTERNAL %1
  449. %assign stack_offset (stack_offset-stack_size_padded)
  450. %assign xmm_regs_used 0
  451. %endmacro
  452. %define has_epilogue regs_used > 7 || xmm_regs_used > 6 || mmsize == 32 || stack_size > 0
  453. %macro RET 0
  454. WIN64_RESTORE_XMM_INTERNAL rsp
  455. POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7
  456. %if mmsize == 32
  457. vzeroupper
  458. %endif
  459. AUTO_REP_RET
  460. %endmacro
  461. %elif ARCH_X86_64 ; *nix x64 ;=============================================
  462. DECLARE_REG 0, rdi
  463. DECLARE_REG 1, rsi
  464. DECLARE_REG 2, rdx
  465. DECLARE_REG 3, rcx
  466. DECLARE_REG 4, R8
  467. DECLARE_REG 5, R9
  468. DECLARE_REG 6, rax, 8
  469. DECLARE_REG 7, R10, 16
  470. DECLARE_REG 8, R11, 24
  471. DECLARE_REG 9, rbx, 32
  472. DECLARE_REG 10, rbp, 40
  473. DECLARE_REG 11, R12, 48
  474. DECLARE_REG 12, R13, 56
  475. DECLARE_REG 13, R14, 64
  476. DECLARE_REG 14, R15, 72
  477. %macro PROLOGUE 2-5+ ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
  478. %assign num_args %1
  479. %assign regs_used %2
  480. ASSERT regs_used >= num_args
  481. SETUP_STACK_POINTER %4
  482. ASSERT regs_used <= 15
  483. PUSH_IF_USED 9, 10, 11, 12, 13, 14
  484. ALLOC_STACK %4
  485. LOAD_IF_USED 6, 7, 8, 9, 10, 11, 12, 13, 14
  486. DEFINE_ARGS_INTERNAL %0, %4, %5
  487. %endmacro
  488. %define has_epilogue regs_used > 9 || mmsize == 32 || stack_size > 0
  489. %macro RET 0
  490. %if stack_size_padded > 0
  491. %if required_stack_alignment > STACK_ALIGNMENT
  492. mov rsp, rstkm
  493. %else
  494. add rsp, stack_size_padded
  495. %endif
  496. %endif
  497. POP_IF_USED 14, 13, 12, 11, 10, 9
  498. %if mmsize == 32
  499. vzeroupper
  500. %endif
  501. AUTO_REP_RET
  502. %endmacro
  503. %else ; X86_32 ;==============================================================
  504. DECLARE_REG 0, eax, 4
  505. DECLARE_REG 1, ecx, 8
  506. DECLARE_REG 2, edx, 12
  507. DECLARE_REG 3, ebx, 16
  508. DECLARE_REG 4, esi, 20
  509. DECLARE_REG 5, edi, 24
  510. DECLARE_REG 6, ebp, 28
  511. %define rsp esp
  512. %macro DECLARE_ARG 1-*
  513. %rep %0
  514. %define r%1m [rstk + stack_offset + 4*%1 + 4]
  515. %define r%1mp dword r%1m
  516. %rotate 1
  517. %endrep
  518. %endmacro
  519. DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14
  520. %macro PROLOGUE 2-5+ ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
  521. %assign num_args %1
  522. %assign regs_used %2
  523. ASSERT regs_used >= num_args
  524. %if num_args > 7
  525. %assign num_args 7
  526. %endif
  527. %if regs_used > 7
  528. %assign regs_used 7
  529. %endif
  530. SETUP_STACK_POINTER %4
  531. ASSERT regs_used <= 7
  532. PUSH_IF_USED 3, 4, 5, 6
  533. ALLOC_STACK %4
  534. LOAD_IF_USED 0, 1, 2, 3, 4, 5, 6
  535. DEFINE_ARGS_INTERNAL %0, %4, %5
  536. %endmacro
  537. %define has_epilogue regs_used > 3 || mmsize == 32 || stack_size > 0
  538. %macro RET 0
  539. %if stack_size_padded > 0
  540. %if required_stack_alignment > STACK_ALIGNMENT
  541. mov rsp, rstkm
  542. %else
  543. add rsp, stack_size_padded
  544. %endif
  545. %endif
  546. POP_IF_USED 6, 5, 4, 3
  547. %if mmsize == 32
  548. vzeroupper
  549. %endif
  550. AUTO_REP_RET
  551. %endmacro
  552. %endif ;======================================================================
  553. %if WIN64 == 0
  554. %macro WIN64_SPILL_XMM 1
  555. %endmacro
  556. %macro WIN64_RESTORE_XMM 1
  557. %endmacro
  558. %macro WIN64_PUSH_XMM 0
  559. %endmacro
  560. %endif
  561. ; On AMD cpus <=K10, an ordinary ret is slow if it immediately follows either
  562. ; a branch or a branch target. So switch to a 2-byte form of ret in that case.
  563. ; We can automatically detect "follows a branch", but not a branch target.
  564. ; (SSSE3 is a sufficient condition to know that your cpu doesn't have this problem.)
  565. %macro REP_RET 0
  566. %if has_epilogue
  567. RET
  568. %else
  569. rep ret
  570. %endif
  571. annotate_function_size
  572. %endmacro
  573. %define last_branch_adr $$
  574. %macro AUTO_REP_RET 0
  575. %if notcpuflag(ssse3)
  576. times ((last_branch_adr-$)>>31)+1 rep ; times 1 iff $ == last_branch_adr.
  577. %endif
  578. ret
  579. annotate_function_size
  580. %endmacro
  581. %macro BRANCH_INSTR 0-*
  582. %rep %0
  583. %macro %1 1-2 %1
  584. %2 %1
  585. %if notcpuflag(ssse3)
  586. %%branch_instr equ $
  587. %xdefine last_branch_adr %%branch_instr
  588. %endif
  589. %endmacro
  590. %rotate 1
  591. %endrep
  592. %endmacro
  593. BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, jna, jnae, jb, jbe, jnb, jnbe, jc, jnc, js, jns, jo, jno, jp, jnp
  594. %macro TAIL_CALL 2 ; callee, is_nonadjacent
  595. %if has_epilogue
  596. call %1
  597. RET
  598. %elif %2
  599. jmp %1
  600. %endif
  601. annotate_function_size
  602. %endmacro
  603. ;=============================================================================
  604. ; arch-independent part
  605. ;=============================================================================
  606. %assign function_align 16
  607. ; Begin a function.
  608. ; Applies any symbol mangling needed for C linkage, and sets up a define such that
  609. ; subsequent uses of the function name automatically refer to the mangled version.
  610. ; Appends cpuflags to the function name if cpuflags has been specified.
  611. ; The "" empty default parameter is a workaround for nasm, which fails if SUFFIX
  612. ; is empty and we call cglobal_internal with just %1 %+ SUFFIX (without %2).
  613. %macro cglobal 1-2+ "" ; name, [PROLOGUE args]
  614. cglobal_internal 1, %1 %+ SUFFIX, %2
  615. %endmacro
  616. %macro cvisible 1-2+ "" ; name, [PROLOGUE args]
  617. cglobal_internal 0, %1 %+ SUFFIX, %2
  618. %endmacro
  619. %macro cglobal_internal 2-3+
  620. annotate_function_size
  621. %if %1
  622. %xdefine %%FUNCTION_PREFIX private_prefix
  623. %xdefine %%VISIBILITY hidden
  624. %else
  625. %xdefine %%FUNCTION_PREFIX public_prefix
  626. %xdefine %%VISIBILITY
  627. %endif
  628. %ifndef cglobaled_%2
  629. %xdefine %2 mangle(%%FUNCTION_PREFIX %+ _ %+ %2)
  630. %xdefine %2.skip_prologue %2 %+ .skip_prologue
  631. CAT_XDEFINE cglobaled_, %2, 1
  632. %endif
  633. %xdefine current_function %2
  634. %xdefine current_function_section __SECT__
  635. %if FORMAT_ELF
  636. global %2:function %%VISIBILITY
  637. %else
  638. global %2
  639. %endif
  640. align function_align
  641. %2:
  642. RESET_MM_PERMUTATION ; needed for x86-64, also makes disassembly somewhat nicer
  643. %xdefine rstk rsp ; copy of the original stack pointer, used when greater alignment than the known stack alignment is required
  644. %assign stack_offset 0 ; stack pointer offset relative to the return address
  645. %assign stack_size 0 ; amount of stack space that can be freely used inside a function
  646. %assign stack_size_padded 0 ; total amount of allocated stack space, including space for callee-saved xmm registers on WIN64 and alignment padding
  647. %assign xmm_regs_used 0 ; number of XMM registers requested, used for dealing with callee-saved registers on WIN64
  648. %ifnidn %3, ""
  649. PROLOGUE %3
  650. %endif
  651. %endmacro
  652. %macro cextern 1
  653. %xdefine %1 mangle(private_prefix %+ _ %+ %1)
  654. CAT_XDEFINE cglobaled_, %1, 1
  655. extern %1
  656. %endmacro
  657. ; like cextern, but without the prefix
  658. %macro cextern_naked 1
  659. %ifdef PREFIX
  660. %xdefine %1 mangle(%1)
  661. %endif
  662. CAT_XDEFINE cglobaled_, %1, 1
  663. extern %1
  664. %endmacro
  665. %macro const 1-2+
  666. %xdefine %1 mangle(private_prefix %+ _ %+ %1)
  667. %if FORMAT_ELF
  668. global %1:data hidden
  669. %else
  670. global %1
  671. %endif
  672. %1: %2
  673. %endmacro
  674. ; This is needed for ELF, otherwise the GNU linker assumes the stack is executable by default.
  675. %if FORMAT_ELF
  676. [SECTION .note.GNU-stack noalloc noexec nowrite progbits]
  677. %endif
  678. ; Tell debuggers how large the function was.
  679. ; This may be invoked multiple times per function; we rely on later instances overriding earlier ones.
  680. ; This is invoked by RET and similar macros, and also cglobal does it for the previous function,
  681. ; but if the last function in a source file doesn't use any of the standard macros for its epilogue,
  682. ; then its size might be unspecified.
  683. %macro annotate_function_size 0
  684. %ifdef __YASM_VER__
  685. %ifdef current_function
  686. %if FORMAT_ELF
  687. current_function_section
  688. %%ecf equ $
  689. size current_function %%ecf - current_function
  690. __SECT__
  691. %endif
  692. %endif
  693. %endif
  694. %endmacro
  695. ; cpuflags
  696. %assign cpuflags_mmx (1<<0)
  697. %assign cpuflags_mmx2 (1<<1) | cpuflags_mmx
  698. %assign cpuflags_3dnow (1<<2) | cpuflags_mmx
  699. %assign cpuflags_3dnowext (1<<3) | cpuflags_3dnow
  700. %assign cpuflags_sse (1<<4) | cpuflags_mmx2
  701. %assign cpuflags_sse2 (1<<5) | cpuflags_sse
  702. %assign cpuflags_sse2slow (1<<6) | cpuflags_sse2
  703. %assign cpuflags_sse3 (1<<7) | cpuflags_sse2
  704. %assign cpuflags_ssse3 (1<<8) | cpuflags_sse3
  705. %assign cpuflags_sse4 (1<<9) | cpuflags_ssse3
  706. %assign cpuflags_sse42 (1<<10)| cpuflags_sse4
  707. %assign cpuflags_avx (1<<11)| cpuflags_sse42
  708. %assign cpuflags_xop (1<<12)| cpuflags_avx
  709. %assign cpuflags_fma4 (1<<13)| cpuflags_avx
  710. %assign cpuflags_fma3 (1<<14)| cpuflags_avx
  711. %assign cpuflags_avx2 (1<<15)| cpuflags_fma3
  712. %assign cpuflags_cache32 (1<<16)
  713. %assign cpuflags_cache64 (1<<17)
  714. %assign cpuflags_slowctz (1<<18)
  715. %assign cpuflags_lzcnt (1<<19)
  716. %assign cpuflags_aligned (1<<20) ; not a cpu feature, but a function variant
  717. %assign cpuflags_atom (1<<21)
  718. %assign cpuflags_bmi1 (1<<22)|cpuflags_lzcnt
  719. %assign cpuflags_bmi2 (1<<23)|cpuflags_bmi1
  720. ; Returns a boolean value expressing whether or not the specified cpuflag is enabled.
  721. %define cpuflag(x) (((((cpuflags & (cpuflags_ %+ x)) ^ (cpuflags_ %+ x)) - 1) >> 31) & 1)
  722. %define notcpuflag(x) (cpuflag(x) ^ 1)
  723. ; Takes an arbitrary number of cpuflags from the above list.
  724. ; All subsequent functions (up to the next INIT_CPUFLAGS) is built for the specified cpu.
  725. ; You shouldn't need to invoke this macro directly, it's a subroutine for INIT_MMX &co.
  726. %macro INIT_CPUFLAGS 0-*
  727. %xdefine SUFFIX
  728. %undef cpuname
  729. %assign cpuflags 0
  730. %if %0 >= 1
  731. %rep %0
  732. %ifdef cpuname
  733. %xdefine cpuname cpuname %+ _%1
  734. %else
  735. %xdefine cpuname %1
  736. %endif
  737. %assign cpuflags cpuflags | cpuflags_%1
  738. %rotate 1
  739. %endrep
  740. %xdefine SUFFIX _ %+ cpuname
  741. %if cpuflag(avx)
  742. %assign avx_enabled 1
  743. %endif
  744. %if (mmsize == 16 && notcpuflag(sse2)) || (mmsize == 32 && notcpuflag(avx2))
  745. %define mova movaps
  746. %define movu movups
  747. %define movnta movntps
  748. %endif
  749. %if cpuflag(aligned)
  750. %define movu mova
  751. %elif cpuflag(sse3) && notcpuflag(ssse3)
  752. %define movu lddqu
  753. %endif
  754. %endif
  755. %if ARCH_X86_64 || cpuflag(sse2)
  756. CPUNOP amdnop
  757. %else
  758. CPUNOP basicnop
  759. %endif
  760. %endmacro
  761. ; Merge mmx and sse*
  762. ; m# is a simd register of the currently selected size
  763. ; xm# is the corresponding xmm register if mmsize >= 16, otherwise the same as m#
  764. ; ym# is the corresponding ymm register if mmsize >= 32, otherwise the same as m#
  765. ; (All 3 remain in sync through SWAP.)
  766. %macro CAT_XDEFINE 3
  767. %xdefine %1%2 %3
  768. %endmacro
  769. %macro CAT_UNDEF 2
  770. %undef %1%2
  771. %endmacro
  772. %macro INIT_MMX 0-1+
  773. %assign avx_enabled 0
  774. %define RESET_MM_PERMUTATION INIT_MMX %1
  775. %define mmsize 8
  776. %define num_mmregs 8
  777. %define mova movq
  778. %define movu movq
  779. %define movh movd
  780. %define movnta movntq
  781. %assign %%i 0
  782. %rep 8
  783. CAT_XDEFINE m, %%i, mm %+ %%i
  784. CAT_XDEFINE nnmm, %%i, %%i
  785. %assign %%i %%i+1
  786. %endrep
  787. %rep 8
  788. CAT_UNDEF m, %%i
  789. CAT_UNDEF nnmm, %%i
  790. %assign %%i %%i+1
  791. %endrep
  792. INIT_CPUFLAGS %1
  793. %endmacro
  794. %macro INIT_XMM 0-1+
  795. %assign avx_enabled 0
  796. %define RESET_MM_PERMUTATION INIT_XMM %1
  797. %define mmsize 16
  798. %define num_mmregs 8
  799. %if ARCH_X86_64
  800. %define num_mmregs 16
  801. %endif
  802. %define mova movdqa
  803. %define movu movdqu
  804. %define movh movq
  805. %define movnta movntdq
  806. %assign %%i 0
  807. %rep num_mmregs
  808. CAT_XDEFINE m, %%i, xmm %+ %%i
  809. CAT_XDEFINE nnxmm, %%i, %%i
  810. %assign %%i %%i+1
  811. %endrep
  812. INIT_CPUFLAGS %1
  813. %endmacro
  814. %macro INIT_YMM 0-1+
  815. %assign avx_enabled 1
  816. %define RESET_MM_PERMUTATION INIT_YMM %1
  817. %define mmsize 32
  818. %define num_mmregs 8
  819. %if ARCH_X86_64
  820. %define num_mmregs 16
  821. %endif
  822. %define mova movdqa
  823. %define movu movdqu
  824. %undef movh
  825. %define movnta movntdq
  826. %assign %%i 0
  827. %rep num_mmregs
  828. CAT_XDEFINE m, %%i, ymm %+ %%i
  829. CAT_XDEFINE nnymm, %%i, %%i
  830. %assign %%i %%i+1
  831. %endrep
  832. INIT_CPUFLAGS %1
  833. %endmacro
  834. INIT_XMM
  835. %macro DECLARE_MMCAST 1
  836. %define mmmm%1 mm%1
  837. %define mmxmm%1 mm%1
  838. %define mmymm%1 mm%1
  839. %define xmmmm%1 mm%1
  840. %define xmmxmm%1 xmm%1
  841. %define xmmymm%1 xmm%1
  842. %define ymmmm%1 mm%1
  843. %define ymmxmm%1 xmm%1
  844. %define ymmymm%1 ymm%1
  845. %define xm%1 xmm %+ m%1
  846. %define ym%1 ymm %+ m%1
  847. %endmacro
  848. %assign i 0
  849. %rep 16
  850. DECLARE_MMCAST i
  851. %assign i i+1
  852. %endrep
  853. ; I often want to use macros that permute their arguments. e.g. there's no
  854. ; efficient way to implement butterfly or transpose or dct without swapping some
  855. ; arguments.
  856. ;
  857. ; I would like to not have to manually keep track of the permutations:
  858. ; If I insert a permutation in the middle of a function, it should automatically
  859. ; change everything that follows. For more complex macros I may also have multiple
  860. ; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations.
  861. ;
  862. ; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that
  863. ; permutes its arguments. It's equivalent to exchanging the contents of the
  864. ; registers, except that this way you exchange the register names instead, so it
  865. ; doesn't cost any cycles.
  866. %macro PERMUTE 2-* ; takes a list of pairs to swap
  867. %rep %0/2
  868. %xdefine %%tmp%2 m%2
  869. %rotate 2
  870. %endrep
  871. %rep %0/2
  872. %xdefine m%1 %%tmp%2
  873. CAT_XDEFINE nn, m%1, %1
  874. %rotate 2
  875. %endrep
  876. %endmacro
  877. %macro SWAP 2+ ; swaps a single chain (sometimes more concise than pairs)
  878. %ifnum %1 ; SWAP 0, 1, ...
  879. SWAP_INTERNAL_NUM %1, %2
  880. %else ; SWAP m0, m1, ...
  881. SWAP_INTERNAL_NAME %1, %2
  882. %endif
  883. %endmacro
  884. %macro SWAP_INTERNAL_NUM 2-*
  885. %rep %0-1
  886. %xdefine %%tmp m%1
  887. %xdefine m%1 m%2
  888. %xdefine m%2 %%tmp
  889. CAT_XDEFINE nn, m%1, %1
  890. CAT_XDEFINE nn, m%2, %2
  891. %rotate 1
  892. %endrep
  893. %endmacro
  894. %macro SWAP_INTERNAL_NAME 2-*
  895. %xdefine %%args nn %+ %1
  896. %rep %0-1
  897. %xdefine %%args %%args, nn %+ %2
  898. %rotate 1
  899. %endrep
  900. SWAP_INTERNAL_NUM %%args
  901. %endmacro
  902. ; If SAVE_MM_PERMUTATION is placed at the end of a function, then any later
  903. ; calls to that function will automatically load the permutation, so values can
  904. ; be returned in mmregs.
  905. %macro SAVE_MM_PERMUTATION 0-1
  906. %if %0
  907. %xdefine %%f %1_m
  908. %else
  909. %xdefine %%f current_function %+ _m
  910. %endif
  911. %assign %%i 0
  912. %rep num_mmregs
  913. CAT_XDEFINE %%f, %%i, m %+ %%i
  914. %assign %%i %%i+1
  915. %endrep
  916. %endmacro
  917. %macro LOAD_MM_PERMUTATION 1 ; name to load from
  918. %ifdef %1_m0
  919. %assign %%i 0
  920. %rep num_mmregs
  921. CAT_XDEFINE m, %%i, %1_m %+ %%i
  922. CAT_XDEFINE nn, m %+ %%i, %%i
  923. %assign %%i %%i+1
  924. %endrep
  925. %endif
  926. %endmacro
  927. ; Append cpuflags to the callee's name iff the appended name is known and the plain name isn't
  928. %macro call 1
  929. call_internal %1 %+ SUFFIX, %1
  930. %endmacro
  931. %macro call_internal 2
  932. %xdefine %%i %2
  933. %ifndef cglobaled_%2
  934. %ifdef cglobaled_%1
  935. %xdefine %%i %1
  936. %endif
  937. %endif
  938. call %%i
  939. LOAD_MM_PERMUTATION %%i
  940. %endmacro
  941. ; Substitutions that reduce instruction size but are functionally equivalent
  942. %macro add 2
  943. %ifnum %2
  944. %if %2==128
  945. sub %1, -128
  946. %else
  947. add %1, %2
  948. %endif
  949. %else
  950. add %1, %2
  951. %endif
  952. %endmacro
  953. %macro sub 2
  954. %ifnum %2
  955. %if %2==128
  956. add %1, -128
  957. %else
  958. sub %1, %2
  959. %endif
  960. %else
  961. sub %1, %2
  962. %endif
  963. %endmacro
  964. ;=============================================================================
  965. ; AVX abstraction layer
  966. ;=============================================================================
  967. %assign i 0
  968. %rep 16
  969. %if i < 8
  970. CAT_XDEFINE sizeofmm, i, 8
  971. %endif
  972. CAT_XDEFINE sizeofxmm, i, 16
  973. CAT_XDEFINE sizeofymm, i, 32
  974. %assign i i+1
  975. %endrep
  976. %undef i
  977. %macro CHECK_AVX_INSTR_EMU 3-*
  978. %xdefine %%opcode %1
  979. %xdefine %%dst %2
  980. %rep %0-2
  981. %ifidn %%dst, %3
  982. %error non-avx emulation of ``%%opcode'' is not supported
  983. %endif
  984. %rotate 1
  985. %endrep
  986. %endmacro
  987. ;%1 == instruction
  988. ;%2 == minimal instruction set
  989. ;%3 == 1 if float, 0 if int
  990. ;%4 == 1 if non-destructive or 4-operand (xmm, xmm, xmm, imm), 0 otherwise
  991. ;%5 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not
  992. ;%6+: operands
  993. %macro RUN_AVX_INSTR 6-9+
  994. %ifnum sizeof%7
  995. %assign __sizeofreg sizeof%7
  996. %elifnum sizeof%6
  997. %assign __sizeofreg sizeof%6
  998. %else
  999. %assign __sizeofreg mmsize
  1000. %endif
  1001. %assign __emulate_avx 0
  1002. %if avx_enabled && __sizeofreg >= 16
  1003. %xdefine __instr v%1
  1004. %else
  1005. %xdefine __instr %1
  1006. %if %0 >= 8+%4
  1007. %assign __emulate_avx 1
  1008. %endif
  1009. %endif
  1010. %ifnidn %2, fnord
  1011. %ifdef cpuname
  1012. %if notcpuflag(%2)
  1013. %error use of ``%1'' %2 instruction in cpuname function: current_function
  1014. %elif cpuflags_%2 < cpuflags_sse && notcpuflag(sse2) && __sizeofreg > 8
  1015. %error use of ``%1'' sse2 instruction in cpuname function: current_function
  1016. %endif
  1017. %endif
  1018. %endif
  1019. %if __emulate_avx
  1020. %xdefine __src1 %7
  1021. %xdefine __src2 %8
  1022. %ifnidn %6, %7
  1023. %if %0 >= 9
  1024. CHECK_AVX_INSTR_EMU {%1 %6, %7, %8, %9}, %6, %8, %9
  1025. %else
  1026. CHECK_AVX_INSTR_EMU {%1 %6, %7, %8}, %6, %8
  1027. %endif
  1028. %if %5 && %4 == 0
  1029. %ifnid %8
  1030. ; 3-operand AVX instructions with a memory arg can only have it in src2,
  1031. ; whereas SSE emulation prefers to have it in src1 (i.e. the mov).
  1032. ; So, if the instruction is commutative with a memory arg, swap them.
  1033. %xdefine __src1 %8
  1034. %xdefine __src2 %7
  1035. %endif
  1036. %endif
  1037. %if __sizeofreg == 8
  1038. MOVQ %6, __src1
  1039. %elif %3
  1040. MOVAPS %6, __src1
  1041. %else
  1042. MOVDQA %6, __src1
  1043. %endif
  1044. %endif
  1045. %if %0 >= 9
  1046. %1 %6, __src2, %9
  1047. %else
  1048. %1 %6, __src2
  1049. %endif
  1050. %elif %0 >= 9
  1051. __instr %6, %7, %8, %9
  1052. %elif %0 == 8
  1053. __instr %6, %7, %8
  1054. %elif %0 == 7
  1055. __instr %6, %7
  1056. %else
  1057. __instr %6
  1058. %endif
  1059. %endmacro
  1060. ;%1 == instruction
  1061. ;%2 == minimal instruction set
  1062. ;%3 == 1 if float, 0 if int
  1063. ;%4 == 1 if non-destructive or 4-operand (xmm, xmm, xmm, imm), 0 otherwise
  1064. ;%5 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not
  1065. %macro AVX_INSTR 1-5 fnord, 0, 1, 0
  1066. %macro %1 1-10 fnord, fnord, fnord, fnord, %1, %2, %3, %4, %5
  1067. %ifidn %2, fnord
  1068. RUN_AVX_INSTR %6, %7, %8, %9, %10, %1
  1069. %elifidn %3, fnord
  1070. RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2
  1071. %elifidn %4, fnord
  1072. RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3
  1073. %elifidn %5, fnord
  1074. RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3, %4
  1075. %else
  1076. RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3, %4, %5
  1077. %endif
  1078. %endmacro
  1079. %endmacro
  1080. ; Instructions with both VEX and non-VEX encodings
  1081. ; Non-destructive instructions are written without parameters
  1082. AVX_INSTR addpd, sse2, 1, 0, 1
  1083. AVX_INSTR addps, sse, 1, 0, 1
  1084. AVX_INSTR addsd, sse2, 1, 0, 1
  1085. AVX_INSTR addss, sse, 1, 0, 1
  1086. AVX_INSTR addsubpd, sse3, 1, 0, 0
  1087. AVX_INSTR addsubps, sse3, 1, 0, 0
  1088. AVX_INSTR aesdec, fnord, 0, 0, 0
  1089. AVX_INSTR aesdeclast, fnord, 0, 0, 0
  1090. AVX_INSTR aesenc, fnord, 0, 0, 0
  1091. AVX_INSTR aesenclast, fnord, 0, 0, 0
  1092. AVX_INSTR aesimc
  1093. AVX_INSTR aeskeygenassist
  1094. AVX_INSTR andnpd, sse2, 1, 0, 0
  1095. AVX_INSTR andnps, sse, 1, 0, 0
  1096. AVX_INSTR andpd, sse2, 1, 0, 1
  1097. AVX_INSTR andps, sse, 1, 0, 1
  1098. AVX_INSTR blendpd, sse4, 1, 0, 0
  1099. AVX_INSTR blendps, sse4, 1, 0, 0
  1100. AVX_INSTR blendvpd, sse4, 1, 0, 0
  1101. AVX_INSTR blendvps, sse4, 1, 0, 0
  1102. AVX_INSTR cmppd, sse2, 1, 1, 0
  1103. AVX_INSTR cmpps, sse, 1, 1, 0
  1104. AVX_INSTR cmpsd, sse2, 1, 1, 0
  1105. AVX_INSTR cmpss, sse, 1, 1, 0
  1106. AVX_INSTR comisd, sse2
  1107. AVX_INSTR comiss, sse
  1108. AVX_INSTR cvtdq2pd, sse2
  1109. AVX_INSTR cvtdq2ps, sse2
  1110. AVX_INSTR cvtpd2dq, sse2
  1111. AVX_INSTR cvtpd2ps, sse2
  1112. AVX_INSTR cvtps2dq, sse2
  1113. AVX_INSTR cvtps2pd, sse2
  1114. AVX_INSTR cvtsd2si, sse2
  1115. AVX_INSTR cvtsd2ss, sse2
  1116. AVX_INSTR cvtsi2sd, sse2
  1117. AVX_INSTR cvtsi2ss, sse
  1118. AVX_INSTR cvtss2sd, sse2
  1119. AVX_INSTR cvtss2si, sse
  1120. AVX_INSTR cvttpd2dq, sse2
  1121. AVX_INSTR cvttps2dq, sse2
  1122. AVX_INSTR cvttsd2si, sse2
  1123. AVX_INSTR cvttss2si, sse
  1124. AVX_INSTR divpd, sse2, 1, 0, 0
  1125. AVX_INSTR divps, sse, 1, 0, 0
  1126. AVX_INSTR divsd, sse2, 1, 0, 0
  1127. AVX_INSTR divss, sse, 1, 0, 0
  1128. AVX_INSTR dppd, sse4, 1, 1, 0
  1129. AVX_INSTR dpps, sse4, 1, 1, 0
  1130. AVX_INSTR extractps, sse4
  1131. AVX_INSTR haddpd, sse3, 1, 0, 0
  1132. AVX_INSTR haddps, sse3, 1, 0, 0
  1133. AVX_INSTR hsubpd, sse3, 1, 0, 0
  1134. AVX_INSTR hsubps, sse3, 1, 0, 0
  1135. AVX_INSTR insertps, sse4, 1, 1, 0
  1136. AVX_INSTR lddqu, sse3
  1137. AVX_INSTR ldmxcsr, sse
  1138. AVX_INSTR maskmovdqu, sse2
  1139. AVX_INSTR maxpd, sse2, 1, 0, 1
  1140. AVX_INSTR maxps, sse, 1, 0, 1
  1141. AVX_INSTR maxsd, sse2, 1, 0, 1
  1142. AVX_INSTR maxss, sse, 1, 0, 1
  1143. AVX_INSTR minpd, sse2, 1, 0, 1
  1144. AVX_INSTR minps, sse, 1, 0, 1
  1145. AVX_INSTR minsd, sse2, 1, 0, 1
  1146. AVX_INSTR minss, sse, 1, 0, 1
  1147. AVX_INSTR movapd, sse2
  1148. AVX_INSTR movaps, sse
  1149. AVX_INSTR movd, mmx
  1150. AVX_INSTR movddup, sse3
  1151. AVX_INSTR movdqa, sse2
  1152. AVX_INSTR movdqu, sse2
  1153. AVX_INSTR movhlps, sse, 1, 0, 0
  1154. AVX_INSTR movhpd, sse2, 1, 0, 0
  1155. AVX_INSTR movhps, sse, 1, 0, 0
  1156. AVX_INSTR movlhps, sse, 1, 0, 0
  1157. AVX_INSTR movlpd, sse2, 1, 0, 0
  1158. AVX_INSTR movlps, sse, 1, 0, 0
  1159. AVX_INSTR movmskpd, sse2
  1160. AVX_INSTR movmskps, sse
  1161. AVX_INSTR movntdq, sse2
  1162. AVX_INSTR movntdqa, sse4
  1163. AVX_INSTR movntpd, sse2
  1164. AVX_INSTR movntps, sse
  1165. AVX_INSTR movq, mmx
  1166. AVX_INSTR movsd, sse2, 1, 0, 0
  1167. AVX_INSTR movshdup, sse3
  1168. AVX_INSTR movsldup, sse3
  1169. AVX_INSTR movss, sse, 1, 0, 0
  1170. AVX_INSTR movupd, sse2
  1171. AVX_INSTR movups, sse
  1172. AVX_INSTR mpsadbw, sse4
  1173. AVX_INSTR mulpd, sse2, 1, 0, 1
  1174. AVX_INSTR mulps, sse, 1, 0, 1
  1175. AVX_INSTR mulsd, sse2, 1, 0, 1
  1176. AVX_INSTR mulss, sse, 1, 0, 1
  1177. AVX_INSTR orpd, sse2, 1, 0, 1
  1178. AVX_INSTR orps, sse, 1, 0, 1
  1179. AVX_INSTR pabsb, ssse3
  1180. AVX_INSTR pabsd, ssse3
  1181. AVX_INSTR pabsw, ssse3
  1182. AVX_INSTR packsswb, mmx, 0, 0, 0
  1183. AVX_INSTR packssdw, mmx, 0, 0, 0
  1184. AVX_INSTR packuswb, mmx, 0, 0, 0
  1185. AVX_INSTR packusdw, sse4, 0, 0, 0
  1186. AVX_INSTR paddb, mmx, 0, 0, 1
  1187. AVX_INSTR paddw, mmx, 0, 0, 1
  1188. AVX_INSTR paddd, mmx, 0, 0, 1
  1189. AVX_INSTR paddq, sse2, 0, 0, 1
  1190. AVX_INSTR paddsb, mmx, 0, 0, 1
  1191. AVX_INSTR paddsw, mmx, 0, 0, 1
  1192. AVX_INSTR paddusb, mmx, 0, 0, 1
  1193. AVX_INSTR paddusw, mmx, 0, 0, 1
  1194. AVX_INSTR palignr, ssse3
  1195. AVX_INSTR pand, mmx, 0, 0, 1
  1196. AVX_INSTR pandn, mmx, 0, 0, 0
  1197. AVX_INSTR pavgb, mmx2, 0, 0, 1
  1198. AVX_INSTR pavgw, mmx2, 0, 0, 1
  1199. AVX_INSTR pblendvb, sse4, 0, 0, 0
  1200. AVX_INSTR pblendw, sse4
  1201. AVX_INSTR pclmulqdq
  1202. AVX_INSTR pcmpestri, sse42
  1203. AVX_INSTR pcmpestrm, sse42
  1204. AVX_INSTR pcmpistri, sse42
  1205. AVX_INSTR pcmpistrm, sse42
  1206. AVX_INSTR pcmpeqb, mmx, 0, 0, 1
  1207. AVX_INSTR pcmpeqw, mmx, 0, 0, 1
  1208. AVX_INSTR pcmpeqd, mmx, 0, 0, 1
  1209. AVX_INSTR pcmpeqq, sse4, 0, 0, 1
  1210. AVX_INSTR pcmpgtb, mmx, 0, 0, 0
  1211. AVX_INSTR pcmpgtw, mmx, 0, 0, 0
  1212. AVX_INSTR pcmpgtd, mmx, 0, 0, 0
  1213. AVX_INSTR pcmpgtq, sse42, 0, 0, 0
  1214. AVX_INSTR pextrb, sse4
  1215. AVX_INSTR pextrd, sse4
  1216. AVX_INSTR pextrq, sse4
  1217. AVX_INSTR pextrw, mmx2
  1218. AVX_INSTR phaddw, ssse3, 0, 0, 0
  1219. AVX_INSTR phaddd, ssse3, 0, 0, 0
  1220. AVX_INSTR phaddsw, ssse3, 0, 0, 0
  1221. AVX_INSTR phminposuw, sse4
  1222. AVX_INSTR phsubw, ssse3, 0, 0, 0
  1223. AVX_INSTR phsubd, ssse3, 0, 0, 0
  1224. AVX_INSTR phsubsw, ssse3, 0, 0, 0
  1225. AVX_INSTR pinsrb, sse4
  1226. AVX_INSTR pinsrd, sse4
  1227. AVX_INSTR pinsrq, sse4
  1228. AVX_INSTR pinsrw, mmx2
  1229. AVX_INSTR pmaddwd, mmx, 0, 0, 1
  1230. AVX_INSTR pmaddubsw, ssse3, 0, 0, 0
  1231. AVX_INSTR pmaxsb, sse4, 0, 0, 1
  1232. AVX_INSTR pmaxsw, mmx2, 0, 0, 1
  1233. AVX_INSTR pmaxsd, sse4, 0, 0, 1
  1234. AVX_INSTR pmaxub, mmx2, 0, 0, 1
  1235. AVX_INSTR pmaxuw, sse4, 0, 0, 1
  1236. AVX_INSTR pmaxud, sse4, 0, 0, 1
  1237. AVX_INSTR pminsb, sse4, 0, 0, 1
  1238. AVX_INSTR pminsw, mmx2, 0, 0, 1
  1239. AVX_INSTR pminsd, sse4, 0, 0, 1
  1240. AVX_INSTR pminub, mmx2, 0, 0, 1
  1241. AVX_INSTR pminuw, sse4, 0, 0, 1
  1242. AVX_INSTR pminud, sse4, 0, 0, 1
  1243. AVX_INSTR pmovmskb, mmx2
  1244. AVX_INSTR pmovsxbw, sse4
  1245. AVX_INSTR pmovsxbd, sse4
  1246. AVX_INSTR pmovsxbq, sse4
  1247. AVX_INSTR pmovsxwd, sse4
  1248. AVX_INSTR pmovsxwq, sse4
  1249. AVX_INSTR pmovsxdq, sse4
  1250. AVX_INSTR pmovzxbw, sse4
  1251. AVX_INSTR pmovzxbd, sse4
  1252. AVX_INSTR pmovzxbq, sse4
  1253. AVX_INSTR pmovzxwd, sse4
  1254. AVX_INSTR pmovzxwq, sse4
  1255. AVX_INSTR pmovzxdq, sse4
  1256. AVX_INSTR pmuldq, sse4, 0, 0, 1
  1257. AVX_INSTR pmulhrsw, ssse3, 0, 0, 1
  1258. AVX_INSTR pmulhuw, mmx2, 0, 0, 1
  1259. AVX_INSTR pmulhw, mmx, 0, 0, 1
  1260. AVX_INSTR pmullw, mmx, 0, 0, 1
  1261. AVX_INSTR pmulld, sse4, 0, 0, 1
  1262. AVX_INSTR pmuludq, sse2, 0, 0, 1
  1263. AVX_INSTR por, mmx, 0, 0, 1
  1264. AVX_INSTR psadbw, mmx2, 0, 0, 1
  1265. AVX_INSTR pshufb, ssse3, 0, 0, 0
  1266. AVX_INSTR pshufd, sse2
  1267. AVX_INSTR pshufhw, sse2
  1268. AVX_INSTR pshuflw, sse2
  1269. AVX_INSTR psignb, ssse3, 0, 0, 0
  1270. AVX_INSTR psignw, ssse3, 0, 0, 0
  1271. AVX_INSTR psignd, ssse3, 0, 0, 0
  1272. AVX_INSTR psllw, mmx, 0, 0, 0
  1273. AVX_INSTR pslld, mmx, 0, 0, 0
  1274. AVX_INSTR psllq, mmx, 0, 0, 0
  1275. AVX_INSTR pslldq, sse2, 0, 0, 0
  1276. AVX_INSTR psraw, mmx, 0, 0, 0
  1277. AVX_INSTR psrad, mmx, 0, 0, 0
  1278. AVX_INSTR psrlw, mmx, 0, 0, 0
  1279. AVX_INSTR psrld, mmx, 0, 0, 0
  1280. AVX_INSTR psrlq, mmx, 0, 0, 0
  1281. AVX_INSTR psrldq, sse2, 0, 0, 0
  1282. AVX_INSTR psubb, mmx, 0, 0, 0
  1283. AVX_INSTR psubw, mmx, 0, 0, 0
  1284. AVX_INSTR psubd, mmx, 0, 0, 0
  1285. AVX_INSTR psubq, sse2, 0, 0, 0
  1286. AVX_INSTR psubsb, mmx, 0, 0, 0
  1287. AVX_INSTR psubsw, mmx, 0, 0, 0
  1288. AVX_INSTR psubusb, mmx, 0, 0, 0
  1289. AVX_INSTR psubusw, mmx, 0, 0, 0
  1290. AVX_INSTR ptest, sse4
  1291. AVX_INSTR punpckhbw, mmx, 0, 0, 0
  1292. AVX_INSTR punpckhwd, mmx, 0, 0, 0
  1293. AVX_INSTR punpckhdq, mmx, 0, 0, 0
  1294. AVX_INSTR punpckhqdq, sse2, 0, 0, 0
  1295. AVX_INSTR punpcklbw, mmx, 0, 0, 0
  1296. AVX_INSTR punpcklwd, mmx, 0, 0, 0
  1297. AVX_INSTR punpckldq, mmx, 0, 0, 0
  1298. AVX_INSTR punpcklqdq, sse2, 0, 0, 0
  1299. AVX_INSTR pxor, mmx, 0, 0, 1
  1300. AVX_INSTR rcpps, sse, 1, 0, 0
  1301. AVX_INSTR rcpss, sse, 1, 0, 0
  1302. AVX_INSTR roundpd, sse4
  1303. AVX_INSTR roundps, sse4
  1304. AVX_INSTR roundsd, sse4
  1305. AVX_INSTR roundss, sse4
  1306. AVX_INSTR rsqrtps, sse, 1, 0, 0
  1307. AVX_INSTR rsqrtss, sse, 1, 0, 0
  1308. AVX_INSTR shufpd, sse2, 1, 1, 0
  1309. AVX_INSTR shufps, sse, 1, 1, 0
  1310. AVX_INSTR sqrtpd, sse2, 1, 0, 0
  1311. AVX_INSTR sqrtps, sse, 1, 0, 0
  1312. AVX_INSTR sqrtsd, sse2, 1, 0, 0
  1313. AVX_INSTR sqrtss, sse, 1, 0, 0
  1314. AVX_INSTR stmxcsr, sse
  1315. AVX_INSTR subpd, sse2, 1, 0, 0
  1316. AVX_INSTR subps, sse, 1, 0, 0
  1317. AVX_INSTR subsd, sse2, 1, 0, 0
  1318. AVX_INSTR subss, sse, 1, 0, 0
  1319. AVX_INSTR ucomisd, sse2
  1320. AVX_INSTR ucomiss, sse
  1321. AVX_INSTR unpckhpd, sse2, 1, 0, 0
  1322. AVX_INSTR unpckhps, sse, 1, 0, 0
  1323. AVX_INSTR unpcklpd, sse2, 1, 0, 0
  1324. AVX_INSTR unpcklps, sse, 1, 0, 0
  1325. AVX_INSTR xorpd, sse2, 1, 0, 1
  1326. AVX_INSTR xorps, sse, 1, 0, 1
  1327. ; 3DNow instructions, for sharing code between AVX, SSE and 3DN
  1328. AVX_INSTR pfadd, 3dnow, 1, 0, 1
  1329. AVX_INSTR pfsub, 3dnow, 1, 0, 0
  1330. AVX_INSTR pfmul, 3dnow, 1, 0, 1
  1331. ; base-4 constants for shuffles
  1332. %assign i 0
  1333. %rep 256
  1334. %assign j ((i>>6)&3)*1000 + ((i>>4)&3)*100 + ((i>>2)&3)*10 + (i&3)
  1335. %if j < 10
  1336. CAT_XDEFINE q000, j, i
  1337. %elif j < 100
  1338. CAT_XDEFINE q00, j, i
  1339. %elif j < 1000
  1340. CAT_XDEFINE q0, j, i
  1341. %else
  1342. CAT_XDEFINE q, j, i
  1343. %endif
  1344. %assign i i+1
  1345. %endrep
  1346. %undef i
  1347. %undef j
  1348. %macro FMA_INSTR 3
  1349. %macro %1 4-7 %1, %2, %3
  1350. %if cpuflag(xop)
  1351. v%5 %1, %2, %3, %4
  1352. %elifnidn %1, %4
  1353. %6 %1, %2, %3
  1354. %7 %1, %4
  1355. %else
  1356. %error non-xop emulation of ``%5 %1, %2, %3, %4'' is not supported
  1357. %endif
  1358. %endmacro
  1359. %endmacro
  1360. FMA_INSTR pmacsww, pmullw, paddw
  1361. FMA_INSTR pmacsdd, pmulld, paddd ; sse4 emulation
  1362. FMA_INSTR pmacsdql, pmuldq, paddq ; sse4 emulation
  1363. FMA_INSTR pmadcswd, pmaddwd, paddd
  1364. ; tzcnt is equivalent to "rep bsf" and is backwards-compatible with bsf.
  1365. ; This lets us use tzcnt without bumping the yasm version requirement yet.
  1366. %define tzcnt rep bsf
  1367. ; Macros for consolidating FMA3 and FMA4 using 4-operand (dst, src1, src2, src3) syntax.
  1368. ; FMA3 is only possible if dst is the same as one of the src registers.
  1369. ; Either src2 or src3 can be a memory operand.
  1370. %macro FMA4_INSTR 2-*
  1371. %push fma4_instr
  1372. %xdefine %$prefix %1
  1373. %rep %0 - 1
  1374. %macro %$prefix%2 4-6 %$prefix, %2
  1375. %if notcpuflag(fma3) && notcpuflag(fma4)
  1376. %error use of ``%5%6'' fma instruction in cpuname function: current_function
  1377. %elif cpuflag(fma4)
  1378. v%5%6 %1, %2, %3, %4
  1379. %elifidn %1, %2
  1380. ; If %3 or %4 is a memory operand it needs to be encoded as the last operand.
  1381. %ifid %3
  1382. v%{5}213%6 %2, %3, %4
  1383. %else
  1384. v%{5}132%6 %2, %4, %3
  1385. %endif
  1386. %elifidn %1, %3
  1387. v%{5}213%6 %3, %2, %4
  1388. %elifidn %1, %4
  1389. v%{5}231%6 %4, %2, %3
  1390. %else
  1391. %error fma3 emulation of ``%5%6 %1, %2, %3, %4'' is not supported
  1392. %endif
  1393. %endmacro
  1394. %rotate 1
  1395. %endrep
  1396. %pop
  1397. %endmacro
  1398. FMA4_INSTR fmadd, pd, ps, sd, ss
  1399. FMA4_INSTR fmaddsub, pd, ps
  1400. FMA4_INSTR fmsub, pd, ps, sd, ss
  1401. FMA4_INSTR fmsubadd, pd, ps
  1402. FMA4_INSTR fnmadd, pd, ps, sd, ss
  1403. FMA4_INSTR fnmsub, pd, ps, sd, ss
  1404. ; workaround: vpbroadcastq is broken in x86_32 due to a yasm bug (fixed in 1.3.0)
  1405. %ifdef __YASM_VER__
  1406. %if __YASM_VERSION_ID__ < 0x01030000 && ARCH_X86_64 == 0
  1407. %macro vpbroadcastq 2
  1408. %if sizeof%1 == 16
  1409. movddup %1, %2
  1410. %else
  1411. vbroadcastsd %1, %2
  1412. %endif
  1413. %endmacro
  1414. %endif
  1415. %endif