You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

218 lines
11KB

  1. /*
  2. * Copyright (c) 2001 Michel Lespinasse
  3. *
  4. * This file is part of Libav.
  5. *
  6. * Libav is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * Libav is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with Libav; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /*
  21. * NOTE: This code is based on GPL code from the libmpeg2 project. The
  22. * author, Michel Lespinasses, has given explicit permission to release
  23. * under LGPL as part of Libav.
  24. */
  25. /*
  26. * Libav integration by Dieter Shirley
  27. *
  28. * This file is a direct copy of the AltiVec IDCT module from the libmpeg2
  29. * project. I've deleted all of the libmpeg2-specific code, renamed the
  30. * functions and reordered the function parameters. The only change to the
  31. * IDCT function itself was to factor out the partial transposition, and to
  32. * perform a full transpose at the end of the function.
  33. */
  34. #include <stdlib.h> /* malloc(), free() */
  35. #include <string.h>
  36. #include "config.h"
  37. #if HAVE_ALTIVEC_H
  38. #include <altivec.h>
  39. #endif
  40. #include "libavcodec/dsputil.h"
  41. #include "types_altivec.h"
  42. #include "dsputil_altivec.h"
  43. #define IDCT_HALF \
  44. /* 1st stage */ \
  45. t1 = vec_mradds (a1, vx7, vx1 ); \
  46. t8 = vec_mradds (a1, vx1, vec_subs (zero, vx7)); \
  47. t7 = vec_mradds (a2, vx5, vx3); \
  48. t3 = vec_mradds (ma2, vx3, vx5); \
  49. \
  50. /* 2nd stage */ \
  51. t5 = vec_adds (vx0, vx4); \
  52. t0 = vec_subs (vx0, vx4); \
  53. t2 = vec_mradds (a0, vx6, vx2); \
  54. t4 = vec_mradds (a0, vx2, vec_subs (zero, vx6)); \
  55. t6 = vec_adds (t8, t3); \
  56. t3 = vec_subs (t8, t3); \
  57. t8 = vec_subs (t1, t7); \
  58. t1 = vec_adds (t1, t7); \
  59. \
  60. /* 3rd stage */ \
  61. t7 = vec_adds (t5, t2); \
  62. t2 = vec_subs (t5, t2); \
  63. t5 = vec_adds (t0, t4); \
  64. t0 = vec_subs (t0, t4); \
  65. t4 = vec_subs (t8, t3); \
  66. t3 = vec_adds (t8, t3); \
  67. \
  68. /* 4th stage */ \
  69. vy0 = vec_adds (t7, t1); \
  70. vy7 = vec_subs (t7, t1); \
  71. vy1 = vec_mradds (c4, t3, t5); \
  72. vy6 = vec_mradds (mc4, t3, t5); \
  73. vy2 = vec_mradds (c4, t4, t0); \
  74. vy5 = vec_mradds (mc4, t4, t0); \
  75. vy3 = vec_adds (t2, t6); \
  76. vy4 = vec_subs (t2, t6);
  77. #define IDCT \
  78. vec_s16 vx0, vx1, vx2, vx3, vx4, vx5, vx6, vx7; \
  79. vec_s16 vy0, vy1, vy2, vy3, vy4, vy5, vy6, vy7; \
  80. vec_s16 a0, a1, a2, ma2, c4, mc4, zero, bias; \
  81. vec_s16 t0, t1, t2, t3, t4, t5, t6, t7, t8; \
  82. vec_u16 shift; \
  83. \
  84. c4 = vec_splat (constants[0], 0); \
  85. a0 = vec_splat (constants[0], 1); \
  86. a1 = vec_splat (constants[0], 2); \
  87. a2 = vec_splat (constants[0], 3); \
  88. mc4 = vec_splat (constants[0], 4); \
  89. ma2 = vec_splat (constants[0], 5); \
  90. bias = (vec_s16)vec_splat ((vec_s32)constants[0], 3); \
  91. \
  92. zero = vec_splat_s16 (0); \
  93. shift = vec_splat_u16 (4); \
  94. \
  95. vx0 = vec_mradds (vec_sl (block[0], shift), constants[1], zero); \
  96. vx1 = vec_mradds (vec_sl (block[1], shift), constants[2], zero); \
  97. vx2 = vec_mradds (vec_sl (block[2], shift), constants[3], zero); \
  98. vx3 = vec_mradds (vec_sl (block[3], shift), constants[4], zero); \
  99. vx4 = vec_mradds (vec_sl (block[4], shift), constants[1], zero); \
  100. vx5 = vec_mradds (vec_sl (block[5], shift), constants[4], zero); \
  101. vx6 = vec_mradds (vec_sl (block[6], shift), constants[3], zero); \
  102. vx7 = vec_mradds (vec_sl (block[7], shift), constants[2], zero); \
  103. \
  104. IDCT_HALF \
  105. \
  106. vx0 = vec_mergeh (vy0, vy4); \
  107. vx1 = vec_mergel (vy0, vy4); \
  108. vx2 = vec_mergeh (vy1, vy5); \
  109. vx3 = vec_mergel (vy1, vy5); \
  110. vx4 = vec_mergeh (vy2, vy6); \
  111. vx5 = vec_mergel (vy2, vy6); \
  112. vx6 = vec_mergeh (vy3, vy7); \
  113. vx7 = vec_mergel (vy3, vy7); \
  114. \
  115. vy0 = vec_mergeh (vx0, vx4); \
  116. vy1 = vec_mergel (vx0, vx4); \
  117. vy2 = vec_mergeh (vx1, vx5); \
  118. vy3 = vec_mergel (vx1, vx5); \
  119. vy4 = vec_mergeh (vx2, vx6); \
  120. vy5 = vec_mergel (vx2, vx6); \
  121. vy6 = vec_mergeh (vx3, vx7); \
  122. vy7 = vec_mergel (vx3, vx7); \
  123. \
  124. vx0 = vec_adds (vec_mergeh (vy0, vy4), bias); \
  125. vx1 = vec_mergel (vy0, vy4); \
  126. vx2 = vec_mergeh (vy1, vy5); \
  127. vx3 = vec_mergel (vy1, vy5); \
  128. vx4 = vec_mergeh (vy2, vy6); \
  129. vx5 = vec_mergel (vy2, vy6); \
  130. vx6 = vec_mergeh (vy3, vy7); \
  131. vx7 = vec_mergel (vy3, vy7); \
  132. \
  133. IDCT_HALF \
  134. \
  135. shift = vec_splat_u16 (6); \
  136. vx0 = vec_sra (vy0, shift); \
  137. vx1 = vec_sra (vy1, shift); \
  138. vx2 = vec_sra (vy2, shift); \
  139. vx3 = vec_sra (vy3, shift); \
  140. vx4 = vec_sra (vy4, shift); \
  141. vx5 = vec_sra (vy5, shift); \
  142. vx6 = vec_sra (vy6, shift); \
  143. vx7 = vec_sra (vy7, shift);
  144. static const vec_s16 constants[5] = {
  145. {23170, 13573, 6518, 21895, -23170, -21895, 32, 31},
  146. {16384, 22725, 21407, 19266, 16384, 19266, 21407, 22725},
  147. {22725, 31521, 29692, 26722, 22725, 26722, 29692, 31521},
  148. {21407, 29692, 27969, 25172, 21407, 25172, 27969, 29692},
  149. {19266, 26722, 25172, 22654, 19266, 22654, 25172, 26722}
  150. };
  151. void idct_put_altivec(uint8_t* dest, int stride, int16_t *blk)
  152. {
  153. vec_s16 *block = (vec_s16*)blk;
  154. vec_u8 tmp;
  155. IDCT
  156. #define COPY(dest,src) \
  157. tmp = vec_packsu (src, src); \
  158. vec_ste ((vec_u32)tmp, 0, (unsigned int *)dest); \
  159. vec_ste ((vec_u32)tmp, 4, (unsigned int *)dest);
  160. COPY (dest, vx0) dest += stride;
  161. COPY (dest, vx1) dest += stride;
  162. COPY (dest, vx2) dest += stride;
  163. COPY (dest, vx3) dest += stride;
  164. COPY (dest, vx4) dest += stride;
  165. COPY (dest, vx5) dest += stride;
  166. COPY (dest, vx6) dest += stride;
  167. COPY (dest, vx7)
  168. }
  169. void idct_add_altivec(uint8_t* dest, int stride, int16_t *blk)
  170. {
  171. vec_s16 *block = (vec_s16*)blk;
  172. vec_u8 tmp;
  173. vec_s16 tmp2, tmp3;
  174. vec_u8 perm0;
  175. vec_u8 perm1;
  176. vec_u8 p0, p1, p;
  177. IDCT
  178. p0 = vec_lvsl (0, dest);
  179. p1 = vec_lvsl (stride, dest);
  180. p = vec_splat_u8 (-1);
  181. perm0 = vec_mergeh (p, p0);
  182. perm1 = vec_mergeh (p, p1);
  183. #define ADD(dest,src,perm) \
  184. /* *(uint64_t *)&tmp = *(uint64_t *)dest; */ \
  185. tmp = vec_ld (0, dest); \
  186. tmp2 = (vec_s16)vec_perm (tmp, (vec_u8)zero, perm); \
  187. tmp3 = vec_adds (tmp2, src); \
  188. tmp = vec_packsu (tmp3, tmp3); \
  189. vec_ste ((vec_u32)tmp, 0, (unsigned int *)dest); \
  190. vec_ste ((vec_u32)tmp, 4, (unsigned int *)dest);
  191. ADD (dest, vx0, perm0) dest += stride;
  192. ADD (dest, vx1, perm1) dest += stride;
  193. ADD (dest, vx2, perm0) dest += stride;
  194. ADD (dest, vx3, perm1) dest += stride;
  195. ADD (dest, vx4, perm0) dest += stride;
  196. ADD (dest, vx5, perm1) dest += stride;
  197. ADD (dest, vx6, perm0) dest += stride;
  198. ADD (dest, vx7, perm1)
  199. }