You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

209 lines
8.0KB

  1. /*
  2. * Copyright (c) 2008 Siarhei Siamashka <ssvb@users.sourceforge.net>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavcodec/dsputil.h"
  21. /*
  22. * VFP is a floating point coprocessor used in some ARM cores. VFP11 has 1 cycle
  23. * throughput for almost all the instructions (except for double precision
  24. * arithmetics), but rather high latency. Latency is 4 cycles for loads and 8 cycles
  25. * for arithmetic operations. Scheduling code to avoid pipeline stalls is very
  26. * important for performance. One more interesting feature is that VFP has
  27. * independent load/store and arithmetics pipelines, so it is possible to make
  28. * them work simultaneously and get more than 1 operation per cycle. Load/store
  29. * pipeline can process 2 single precision floating point values per cycle and
  30. * supports bulk loads and stores for large sets of registers. Arithmetic operations
  31. * can be done on vectors, which allows to keep the arithmetics pipeline busy,
  32. * while the processor may issue and execute other instructions. Detailed
  33. * optimization manuals can be found at http://www.arm.com
  34. */
  35. /**
  36. * ARM VFP optimized implementation of 'vector_fmul_c' function.
  37. * Assume that len is a positive number and is multiple of 8
  38. */
  39. static void vector_fmul_vfp(float *dst, const float *src, int len)
  40. {
  41. int tmp;
  42. __asm__ volatile(
  43. "fmrx %[tmp], fpscr\n\t"
  44. "orr %[tmp], %[tmp], #(3 << 16)\n\t" /* set vector size to 4 */
  45. "fmxr fpscr, %[tmp]\n\t"
  46. "fldmias %[dst_r]!, {s0-s3}\n\t"
  47. "fldmias %[src]!, {s8-s11}\n\t"
  48. "fldmias %[dst_r]!, {s4-s7}\n\t"
  49. "fldmias %[src]!, {s12-s15}\n\t"
  50. "fmuls s8, s0, s8\n\t"
  51. "1:\n\t"
  52. "subs %[len], %[len], #16\n\t"
  53. "fmuls s12, s4, s12\n\t"
  54. "fldmiasge %[dst_r]!, {s16-s19}\n\t"
  55. "fldmiasge %[src]!, {s24-s27}\n\t"
  56. "fldmiasge %[dst_r]!, {s20-s23}\n\t"
  57. "fldmiasge %[src]!, {s28-s31}\n\t"
  58. "fmulsge s24, s16, s24\n\t"
  59. "fstmias %[dst_w]!, {s8-s11}\n\t"
  60. "fstmias %[dst_w]!, {s12-s15}\n\t"
  61. "fmulsge s28, s20, s28\n\t"
  62. "fldmiasgt %[dst_r]!, {s0-s3}\n\t"
  63. "fldmiasgt %[src]!, {s8-s11}\n\t"
  64. "fldmiasgt %[dst_r]!, {s4-s7}\n\t"
  65. "fldmiasgt %[src]!, {s12-s15}\n\t"
  66. "fmulsge s8, s0, s8\n\t"
  67. "fstmiasge %[dst_w]!, {s24-s27}\n\t"
  68. "fstmiasge %[dst_w]!, {s28-s31}\n\t"
  69. "bgt 1b\n\t"
  70. "bic %[tmp], %[tmp], #(7 << 16)\n\t" /* set vector size back to 1 */
  71. "fmxr fpscr, %[tmp]\n\t"
  72. : [dst_w] "+&r" (dst), [dst_r] "+&r" (dst), [src] "+&r" (src), [len] "+&r" (len), [tmp] "=&r" (tmp)
  73. :
  74. : "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
  75. "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
  76. "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
  77. "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
  78. "cc", "memory");
  79. }
  80. /**
  81. * ARM VFP optimized implementation of 'vector_fmul_reverse_c' function.
  82. * Assume that len is a positive number and is multiple of 8
  83. */
  84. static void vector_fmul_reverse_vfp(float *dst, const float *src0, const float *src1, int len)
  85. {
  86. src1 += len;
  87. __asm__ volatile(
  88. "fldmdbs %[src1]!, {s0-s3}\n\t"
  89. "fldmias %[src0]!, {s8-s11}\n\t"
  90. "fldmdbs %[src1]!, {s4-s7}\n\t"
  91. "fldmias %[src0]!, {s12-s15}\n\t"
  92. "fmuls s8, s3, s8\n\t"
  93. "fmuls s9, s2, s9\n\t"
  94. "fmuls s10, s1, s10\n\t"
  95. "fmuls s11, s0, s11\n\t"
  96. "1:\n\t"
  97. "subs %[len], %[len], #16\n\t"
  98. "fldmdbsge %[src1]!, {s16-s19}\n\t"
  99. "fmuls s12, s7, s12\n\t"
  100. "fldmiasge %[src0]!, {s24-s27}\n\t"
  101. "fmuls s13, s6, s13\n\t"
  102. "fldmdbsge %[src1]!, {s20-s23}\n\t"
  103. "fmuls s14, s5, s14\n\t"
  104. "fldmiasge %[src0]!, {s28-s31}\n\t"
  105. "fmuls s15, s4, s15\n\t"
  106. "fmulsge s24, s19, s24\n\t"
  107. "fldmdbsgt %[src1]!, {s0-s3}\n\t"
  108. "fmulsge s25, s18, s25\n\t"
  109. "fstmias %[dst]!, {s8-s13}\n\t"
  110. "fmulsge s26, s17, s26\n\t"
  111. "fldmiasgt %[src0]!, {s8-s11}\n\t"
  112. "fmulsge s27, s16, s27\n\t"
  113. "fmulsge s28, s23, s28\n\t"
  114. "fldmdbsgt %[src1]!, {s4-s7}\n\t"
  115. "fmulsge s29, s22, s29\n\t"
  116. "fstmias %[dst]!, {s14-s15}\n\t"
  117. "fmulsge s30, s21, s30\n\t"
  118. "fmulsge s31, s20, s31\n\t"
  119. "fmulsge s8, s3, s8\n\t"
  120. "fldmiasgt %[src0]!, {s12-s15}\n\t"
  121. "fmulsge s9, s2, s9\n\t"
  122. "fmulsge s10, s1, s10\n\t"
  123. "fstmiasge %[dst]!, {s24-s27}\n\t"
  124. "fmulsge s11, s0, s11\n\t"
  125. "fstmiasge %[dst]!, {s28-s31}\n\t"
  126. "bgt 1b\n\t"
  127. : [dst] "+&r" (dst), [src0] "+&r" (src0), [src1] "+&r" (src1), [len] "+&r" (len)
  128. :
  129. : "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
  130. "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
  131. "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
  132. "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
  133. "cc", "memory");
  134. }
  135. #ifdef HAVE_ARMV6
  136. /**
  137. * ARM VFP optimized float to int16 conversion.
  138. * Assume that len is a positive number and is multiple of 8, destination
  139. * buffer is at least 4 bytes aligned (8 bytes alignment is better for
  140. * performance), little endian byte sex
  141. */
  142. void float_to_int16_vfp(int16_t *dst, const float *src, int len)
  143. {
  144. __asm__ volatile(
  145. "fldmias %[src]!, {s16-s23}\n\t"
  146. "ftosis s0, s16\n\t"
  147. "ftosis s1, s17\n\t"
  148. "ftosis s2, s18\n\t"
  149. "ftosis s3, s19\n\t"
  150. "ftosis s4, s20\n\t"
  151. "ftosis s5, s21\n\t"
  152. "ftosis s6, s22\n\t"
  153. "ftosis s7, s23\n\t"
  154. "1:\n\t"
  155. "subs %[len], %[len], #8\n\t"
  156. "fmrrs r3, r4, {s0, s1}\n\t"
  157. "fmrrs r5, r6, {s2, s3}\n\t"
  158. "fmrrs r7, r8, {s4, s5}\n\t"
  159. "fmrrs ip, lr, {s6, s7}\n\t"
  160. "fldmiasgt %[src]!, {s16-s23}\n\t"
  161. "ssat r4, #16, r4\n\t"
  162. "ssat r3, #16, r3\n\t"
  163. "ssat r6, #16, r6\n\t"
  164. "ssat r5, #16, r5\n\t"
  165. "pkhbt r3, r3, r4, lsl #16\n\t"
  166. "pkhbt r4, r5, r6, lsl #16\n\t"
  167. "ftosisgt s0, s16\n\t"
  168. "ftosisgt s1, s17\n\t"
  169. "ftosisgt s2, s18\n\t"
  170. "ftosisgt s3, s19\n\t"
  171. "ftosisgt s4, s20\n\t"
  172. "ftosisgt s5, s21\n\t"
  173. "ftosisgt s6, s22\n\t"
  174. "ftosisgt s7, s23\n\t"
  175. "ssat r8, #16, r8\n\t"
  176. "ssat r7, #16, r7\n\t"
  177. "ssat lr, #16, lr\n\t"
  178. "ssat ip, #16, ip\n\t"
  179. "pkhbt r5, r7, r8, lsl #16\n\t"
  180. "pkhbt r6, ip, lr, lsl #16\n\t"
  181. "stmia %[dst]!, {r3-r6}\n\t"
  182. "bgt 1b\n\t"
  183. : [dst] "+&r" (dst), [src] "+&r" (src), [len] "+&r" (len)
  184. :
  185. : "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
  186. "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
  187. "r3", "r4", "r5", "r6", "r7", "r8", "ip", "lr",
  188. "cc", "memory");
  189. }
  190. #endif
  191. void ff_float_init_arm_vfp(DSPContext* c, AVCodecContext *avctx)
  192. {
  193. c->vector_fmul = vector_fmul_vfp;
  194. c->vector_fmul_reverse = vector_fmul_reverse_vfp;
  195. #ifdef HAVE_ARMV6
  196. c->float_to_int16 = float_to_int16_vfp;
  197. #endif
  198. }