You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

242 lines
12KB

  1. /*
  2. * Loongson SIMD utils
  3. *
  4. * Copyright (c) 2016 Loongson Technology Corporation Limited
  5. * Copyright (c) 2016 Zhou Xiaoyong <zhouxiaoyong@loongson.cn>
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #ifndef AVUTIL_MIPS_MMIUTILS_H
  24. #define AVUTIL_MIPS_MMIUTILS_H
  25. #include "config.h"
  26. #include "libavutil/mips/asmdefs.h"
  27. #if HAVE_LOONGSON2
  28. #define DECLARE_VAR_LOW32 int32_t low32
  29. #define RESTRICT_ASM_LOW32 [low32]"=&r"(low32),
  30. #define DECLARE_VAR_ALL64 int64_t all64
  31. #define RESTRICT_ASM_ALL64 [all64]"=&r"(all64),
  32. #define DECLARE_VAR_ADDRT mips_reg addrt
  33. #define RESTRICT_ASM_ADDRT [addrt]"=&r"(addrt),
  34. #define MMI_LWX(reg, addr, stride, bias) \
  35. PTR_ADDU "%[addrt], "#addr", "#stride" \n\t" \
  36. "lw "#reg", "#bias"(%[addrt]) \n\t"
  37. #define MMI_SWX(reg, addr, stride, bias) \
  38. PTR_ADDU "%[addrt], "#addr", "#stride" \n\t" \
  39. "sw "#reg", "#bias"(%[addrt]) \n\t"
  40. #define MMI_LDX(reg, addr, stride, bias) \
  41. PTR_ADDU "%[addrt], "#addr", "#stride" \n\t" \
  42. "ld "#reg", "#bias"(%[addrt]) \n\t"
  43. #define MMI_SDX(reg, addr, stride, bias) \
  44. PTR_ADDU "%[addrt], "#addr", "#stride" \n\t" \
  45. "sd "#reg", "#bias"(%[addrt]) \n\t"
  46. #define MMI_LWC1(fp, addr, bias) \
  47. "lwc1 "#fp", "#bias"("#addr") \n\t"
  48. #define MMI_ULWC1(fp, addr, bias) \
  49. "ulw %[low32], "#bias"("#addr") \n\t" \
  50. "mtc1 %[low32], "#fp" \n\t"
  51. #define MMI_LWXC1(fp, addr, stride, bias) \
  52. PTR_ADDU "%[addrt], "#addr", "#stride" \n\t" \
  53. MMI_LWC1(fp, %[addrt], bias)
  54. #define MMI_SWC1(fp, addr, bias) \
  55. "swc1 "#fp", "#bias"("#addr") \n\t"
  56. #define MMI_USWC1(fp, addr, bias) \
  57. "mfc1 %[low32], "#fp" \n\t" \
  58. "usw %[low32], "#bias"("#addr") \n\t"
  59. #define MMI_SWXC1(fp, addr, stride, bias) \
  60. PTR_ADDU "%[addrt], "#addr", "#stride" \n\t" \
  61. MMI_SWC1(fp, %[addrt], bias)
  62. #define MMI_LDC1(fp, addr, bias) \
  63. "ldc1 "#fp", "#bias"("#addr") \n\t"
  64. #define MMI_ULDC1(fp, addr, bias) \
  65. "uld %[all64], "#bias"("#addr") \n\t" \
  66. "dmtc1 %[all64], "#fp" \n\t"
  67. #define MMI_LDXC1(fp, addr, stride, bias) \
  68. PTR_ADDU "%[addrt], "#addr", "#stride" \n\t" \
  69. MMI_LDC1(fp, %[addrt], bias)
  70. #define MMI_SDC1(fp, addr, bias) \
  71. "sdc1 "#fp", "#bias"("#addr") \n\t"
  72. #define MMI_USDC1(fp, addr, bias) \
  73. "dmfc1 %[all64], "#fp" \n\t" \
  74. "usd %[all64], "#bias"("#addr") \n\t"
  75. #define MMI_SDXC1(fp, addr, stride, bias) \
  76. PTR_ADDU "%[addrt], "#addr", "#stride" \n\t" \
  77. MMI_SDC1(fp, %[addrt], bias)
  78. #define MMI_LQ(reg1, reg2, addr, bias) \
  79. "ld "#reg1", "#bias"("#addr") \n\t" \
  80. "ld "#reg2", 8+"#bias"("#addr") \n\t"
  81. #define MMI_SQ(reg1, reg2, addr, bias) \
  82. "sd "#reg1", "#bias"("#addr") \n\t" \
  83. "sd "#reg2", 8+"#bias"("#addr") \n\t"
  84. #define MMI_LQC1(fp1, fp2, addr, bias) \
  85. "ldc1 "#fp1", "#bias"("#addr") \n\t" \
  86. "ldc1 "#fp2", 8+"#bias"("#addr") \n\t"
  87. #define MMI_SQC1(fp1, fp2, addr, bias) \
  88. "sdc1 "#fp1", "#bias"("#addr") \n\t" \
  89. "sdc1 "#fp2", 8+"#bias"("#addr") \n\t"
  90. #elif HAVE_LOONGSON3 /* !HAVE_LOONGSON2 */
  91. #define DECLARE_VAR_ALL64
  92. #define RESTRICT_ASM_ALL64
  93. #define DECLARE_VAR_ADDRT
  94. #define RESTRICT_ASM_ADDRT
  95. #define MMI_LWX(reg, addr, stride, bias) \
  96. "gslwx "#reg", "#bias"("#addr", "#stride") \n\t"
  97. #define MMI_SWX(reg, addr, stride, bias) \
  98. "gsswx "#reg", "#bias"("#addr", "#stride") \n\t"
  99. #define MMI_LDX(reg, addr, stride, bias) \
  100. "gsldx "#reg", "#bias"("#addr", "#stride") \n\t"
  101. #define MMI_SDX(reg, addr, stride, bias) \
  102. "gssdx "#reg", "#bias"("#addr", "#stride") \n\t"
  103. #define MMI_LWC1(fp, addr, bias) \
  104. "lwc1 "#fp", "#bias"("#addr") \n\t"
  105. #if _MIPS_SIM == _ABIO32 /* workaround for 3A2000 gslwlc1 bug */
  106. #define DECLARE_VAR_LOW32 int32_t low32
  107. #define RESTRICT_ASM_LOW32 [low32]"=&r"(low32),
  108. #define MMI_ULWC1(fp, addr, bias) \
  109. "ulw %[low32], "#bias"("#addr") \n\t" \
  110. "mtc1 %[low32], "#fp" \n\t"
  111. #else /* _MIPS_SIM != _ABIO32 */
  112. #define DECLARE_VAR_LOW32
  113. #define RESTRICT_ASM_LOW32
  114. #define MMI_ULWC1(fp, addr, bias) \
  115. "gslwlc1 "#fp", 3+"#bias"("#addr") \n\t" \
  116. "gslwrc1 "#fp", "#bias"("#addr") \n\t"
  117. #endif /* _MIPS_SIM != _ABIO32 */
  118. #define MMI_LWXC1(fp, addr, stride, bias) \
  119. "gslwxc1 "#fp", "#bias"("#addr", "#stride") \n\t"
  120. #define MMI_SWC1(fp, addr, bias) \
  121. "swc1 "#fp", "#bias"("#addr") \n\t"
  122. #define MMI_USWC1(fp, addr, bias) \
  123. "gsswlc1 "#fp", 3+"#bias"("#addr") \n\t" \
  124. "gsswrc1 "#fp", "#bias"("#addr") \n\t"
  125. #define MMI_SWXC1(fp, addr, stride, bias) \
  126. "gsswxc1 "#fp", "#bias"("#addr", "#stride") \n\t"
  127. #define MMI_LDC1(fp, addr, bias) \
  128. "ldc1 "#fp", "#bias"("#addr") \n\t"
  129. #define MMI_ULDC1(fp, addr, bias) \
  130. "gsldlc1 "#fp", 7+"#bias"("#addr") \n\t" \
  131. "gsldrc1 "#fp", "#bias"("#addr") \n\t"
  132. #define MMI_LDXC1(fp, addr, stride, bias) \
  133. "gsldxc1 "#fp", "#bias"("#addr", "#stride") \n\t"
  134. #define MMI_SDC1(fp, addr, bias) \
  135. "sdc1 "#fp", "#bias"("#addr") \n\t"
  136. #define MMI_USDC1(fp, addr, bias) \
  137. "gssdlc1 "#fp", 7+"#bias"("#addr") \n\t" \
  138. "gssdrc1 "#fp", "#bias"("#addr") \n\t"
  139. #define MMI_SDXC1(fp, addr, stride, bias) \
  140. "gssdxc1 "#fp", "#bias"("#addr", "#stride") \n\t"
  141. #define MMI_LQ(reg1, reg2, addr, bias) \
  142. "gslq "#reg1", "#reg2", "#bias"("#addr") \n\t"
  143. #define MMI_SQ(reg1, reg2, addr, bias) \
  144. "gssq "#reg1", "#reg2", "#bias"("#addr") \n\t"
  145. #define MMI_LQC1(fp1, fp2, addr, bias) \
  146. "gslqc1 "#fp1", "#fp2", "#bias"("#addr") \n\t"
  147. #define MMI_SQC1(fp1, fp2, addr, bias) \
  148. "gssqc1 "#fp1", "#fp2", "#bias"("#addr") \n\t"
  149. #endif /* HAVE_LOONGSON2 */
  150. #define TRANSPOSE_4H(m1, m2, m3, m4, t1, t2, t3, t4, t5, r1, zero, shift) \
  151. "li "#r1", 0x93 \n\t" \
  152. "xor "#zero","#zero","#zero" \n\t" \
  153. "mtc1 "#r1", "#shift" \n\t" \
  154. "punpcklhw "#t1", "#m1", "#zero" \n\t" \
  155. "punpcklhw "#t5", "#m2", "#zero" \n\t" \
  156. "pshufh "#t5", "#t5", "#shift" \n\t" \
  157. "or "#t1", "#t1", "#t5" \n\t" \
  158. "punpckhhw "#t2", "#m1", "#zero" \n\t" \
  159. "punpckhhw "#t5", "#m2", "#zero" \n\t" \
  160. "pshufh "#t5", "#t5", "#shift" \n\t" \
  161. "or "#t2", "#t2", "#t5" \n\t" \
  162. "punpcklhw "#t3", "#m3", "#zero" \n\t" \
  163. "punpcklhw "#t5", "#m4", "#zero" \n\t" \
  164. "pshufh "#t5", "#t5", "#shift" \n\t" \
  165. "or "#t3", "#t3", "#t5" \n\t" \
  166. "punpckhhw "#t4", "#m3", "#zero" \n\t" \
  167. "punpckhhw "#t5", "#m4", "#zero" \n\t" \
  168. "pshufh "#t5", "#t5", "#shift" \n\t" \
  169. "or "#t4", "#t4", "#t5" \n\t" \
  170. "punpcklwd "#m1", "#t1", "#t3" \n\t" \
  171. "punpckhwd "#m2", "#t1", "#t3" \n\t" \
  172. "punpcklwd "#m3", "#t2", "#t4" \n\t" \
  173. "punpckhwd "#m4", "#t2", "#t4" \n\t"
  174. #define PSRAH_4_MMI(fp1, fp2, fp3, fp4, shift) \
  175. "psrah "#fp1", "#fp1", "#shift" \n\t" \
  176. "psrah "#fp2", "#fp2", "#shift" \n\t" \
  177. "psrah "#fp3", "#fp3", "#shift" \n\t" \
  178. "psrah "#fp4", "#fp4", "#shift" \n\t"
  179. #define PSRAH_8_MMI(fp1, fp2, fp3, fp4, fp5, fp6, fp7, fp8, shift) \
  180. PSRAH_4_MMI(fp1, fp2, fp3, fp4, shift) \
  181. PSRAH_4_MMI(fp5, fp6, fp7, fp8, shift)
  182. #endif /* AVUTILS_MIPS_MMIUTILS_H */