You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

347 lines
9.6KB

  1. /*
  2. * Alpha optimized DSP utils
  3. * Copyright (c) 2002 Falk Hueffner <falk@debian.org>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "libavcodec/dsputil.h"
  22. #include "dsputil_alpha.h"
  23. #include "asm.h"
  24. void get_pixels_mvi(DCTELEM *restrict block,
  25. const uint8_t *restrict pixels, int line_size)
  26. {
  27. int h = 8;
  28. do {
  29. uint64_t p;
  30. p = ldq(pixels);
  31. stq(unpkbw(p), block);
  32. stq(unpkbw(p >> 32), block + 4);
  33. pixels += line_size;
  34. block += 8;
  35. } while (--h);
  36. }
  37. void diff_pixels_mvi(DCTELEM *block, const uint8_t *s1, const uint8_t *s2,
  38. int stride) {
  39. int h = 8;
  40. uint64_t mask = 0x4040;
  41. mask |= mask << 16;
  42. mask |= mask << 32;
  43. do {
  44. uint64_t x, y, c, d, a;
  45. uint64_t signs;
  46. x = ldq(s1);
  47. y = ldq(s2);
  48. c = cmpbge(x, y);
  49. d = x - y;
  50. a = zap(mask, c); /* We use 0x4040404040404040 here... */
  51. d += 4 * a; /* ...so we can use s4addq here. */
  52. signs = zap(-1, c);
  53. stq(unpkbw(d) | (unpkbw(signs) << 8), block);
  54. stq(unpkbw(d >> 32) | (unpkbw(signs >> 32) << 8), block + 4);
  55. s1 += stride;
  56. s2 += stride;
  57. block += 8;
  58. } while (--h);
  59. }
  60. static inline uint64_t avg2(uint64_t a, uint64_t b)
  61. {
  62. return (a | b) - (((a ^ b) & BYTE_VEC(0xfe)) >> 1);
  63. }
  64. static inline uint64_t avg4(uint64_t l1, uint64_t l2, uint64_t l3, uint64_t l4)
  65. {
  66. uint64_t r1 = ((l1 & ~BYTE_VEC(0x03)) >> 2)
  67. + ((l2 & ~BYTE_VEC(0x03)) >> 2)
  68. + ((l3 & ~BYTE_VEC(0x03)) >> 2)
  69. + ((l4 & ~BYTE_VEC(0x03)) >> 2);
  70. uint64_t r2 = (( (l1 & BYTE_VEC(0x03))
  71. + (l2 & BYTE_VEC(0x03))
  72. + (l3 & BYTE_VEC(0x03))
  73. + (l4 & BYTE_VEC(0x03))
  74. + BYTE_VEC(0x02)) >> 2) & BYTE_VEC(0x03);
  75. return r1 + r2;
  76. }
  77. int pix_abs8x8_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  78. {
  79. int result = 0;
  80. if ((size_t) pix2 & 0x7) {
  81. /* works only when pix2 is actually unaligned */
  82. do { /* do 8 pixel a time */
  83. uint64_t p1, p2;
  84. p1 = ldq(pix1);
  85. p2 = uldq(pix2);
  86. result += perr(p1, p2);
  87. pix1 += line_size;
  88. pix2 += line_size;
  89. } while (--h);
  90. } else {
  91. do {
  92. uint64_t p1, p2;
  93. p1 = ldq(pix1);
  94. p2 = ldq(pix2);
  95. result += perr(p1, p2);
  96. pix1 += line_size;
  97. pix2 += line_size;
  98. } while (--h);
  99. }
  100. return result;
  101. }
  102. #if 0 /* now done in assembly */
  103. int pix_abs16x16_mvi(uint8_t *pix1, uint8_t *pix2, int line_size)
  104. {
  105. int result = 0;
  106. int h = 16;
  107. if ((size_t) pix2 & 0x7) {
  108. /* works only when pix2 is actually unaligned */
  109. do { /* do 16 pixel a time */
  110. uint64_t p1_l, p1_r, p2_l, p2_r;
  111. uint64_t t;
  112. p1_l = ldq(pix1);
  113. p1_r = ldq(pix1 + 8);
  114. t = ldq_u(pix2 + 8);
  115. p2_l = extql(ldq_u(pix2), pix2) | extqh(t, pix2);
  116. p2_r = extql(t, pix2) | extqh(ldq_u(pix2 + 16), pix2);
  117. pix1 += line_size;
  118. pix2 += line_size;
  119. result += perr(p1_l, p2_l)
  120. + perr(p1_r, p2_r);
  121. } while (--h);
  122. } else {
  123. do {
  124. uint64_t p1_l, p1_r, p2_l, p2_r;
  125. p1_l = ldq(pix1);
  126. p1_r = ldq(pix1 + 8);
  127. p2_l = ldq(pix2);
  128. p2_r = ldq(pix2 + 8);
  129. pix1 += line_size;
  130. pix2 += line_size;
  131. result += perr(p1_l, p2_l)
  132. + perr(p1_r, p2_r);
  133. } while (--h);
  134. }
  135. return result;
  136. }
  137. #endif
  138. int pix_abs16x16_x2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  139. {
  140. int result = 0;
  141. uint64_t disalign = (size_t) pix2 & 0x7;
  142. switch (disalign) {
  143. case 0:
  144. do {
  145. uint64_t p1_l, p1_r, p2_l, p2_r;
  146. uint64_t l, r;
  147. p1_l = ldq(pix1);
  148. p1_r = ldq(pix1 + 8);
  149. l = ldq(pix2);
  150. r = ldq(pix2 + 8);
  151. p2_l = avg2(l, (l >> 8) | ((uint64_t) r << 56));
  152. p2_r = avg2(r, (r >> 8) | ((uint64_t) pix2[16] << 56));
  153. pix1 += line_size;
  154. pix2 += line_size;
  155. result += perr(p1_l, p2_l)
  156. + perr(p1_r, p2_r);
  157. } while (--h);
  158. break;
  159. case 7:
  160. /* |.......l|lllllllr|rrrrrrr*|
  161. This case is special because disalign1 would be 8, which
  162. gets treated as 0 by extqh. At least it is a bit faster
  163. that way :) */
  164. do {
  165. uint64_t p1_l, p1_r, p2_l, p2_r;
  166. uint64_t l, m, r;
  167. p1_l = ldq(pix1);
  168. p1_r = ldq(pix1 + 8);
  169. l = ldq_u(pix2);
  170. m = ldq_u(pix2 + 8);
  171. r = ldq_u(pix2 + 16);
  172. p2_l = avg2(extql(l, disalign) | extqh(m, disalign), m);
  173. p2_r = avg2(extql(m, disalign) | extqh(r, disalign), r);
  174. pix1 += line_size;
  175. pix2 += line_size;
  176. result += perr(p1_l, p2_l)
  177. + perr(p1_r, p2_r);
  178. } while (--h);
  179. break;
  180. default:
  181. do {
  182. uint64_t disalign1 = disalign + 1;
  183. uint64_t p1_l, p1_r, p2_l, p2_r;
  184. uint64_t l, m, r;
  185. p1_l = ldq(pix1);
  186. p1_r = ldq(pix1 + 8);
  187. l = ldq_u(pix2);
  188. m = ldq_u(pix2 + 8);
  189. r = ldq_u(pix2 + 16);
  190. p2_l = avg2(extql(l, disalign) | extqh(m, disalign),
  191. extql(l, disalign1) | extqh(m, disalign1));
  192. p2_r = avg2(extql(m, disalign) | extqh(r, disalign),
  193. extql(m, disalign1) | extqh(r, disalign1));
  194. pix1 += line_size;
  195. pix2 += line_size;
  196. result += perr(p1_l, p2_l)
  197. + perr(p1_r, p2_r);
  198. } while (--h);
  199. break;
  200. }
  201. return result;
  202. }
  203. int pix_abs16x16_y2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  204. {
  205. int result = 0;
  206. if ((size_t) pix2 & 0x7) {
  207. uint64_t t, p2_l, p2_r;
  208. t = ldq_u(pix2 + 8);
  209. p2_l = extql(ldq_u(pix2), pix2) | extqh(t, pix2);
  210. p2_r = extql(t, pix2) | extqh(ldq_u(pix2 + 16), pix2);
  211. do {
  212. uint64_t p1_l, p1_r, np2_l, np2_r;
  213. uint64_t t;
  214. p1_l = ldq(pix1);
  215. p1_r = ldq(pix1 + 8);
  216. pix2 += line_size;
  217. t = ldq_u(pix2 + 8);
  218. np2_l = extql(ldq_u(pix2), pix2) | extqh(t, pix2);
  219. np2_r = extql(t, pix2) | extqh(ldq_u(pix2 + 16), pix2);
  220. result += perr(p1_l, avg2(p2_l, np2_l))
  221. + perr(p1_r, avg2(p2_r, np2_r));
  222. pix1 += line_size;
  223. p2_l = np2_l;
  224. p2_r = np2_r;
  225. } while (--h);
  226. } else {
  227. uint64_t p2_l, p2_r;
  228. p2_l = ldq(pix2);
  229. p2_r = ldq(pix2 + 8);
  230. do {
  231. uint64_t p1_l, p1_r, np2_l, np2_r;
  232. p1_l = ldq(pix1);
  233. p1_r = ldq(pix1 + 8);
  234. pix2 += line_size;
  235. np2_l = ldq(pix2);
  236. np2_r = ldq(pix2 + 8);
  237. result += perr(p1_l, avg2(p2_l, np2_l))
  238. + perr(p1_r, avg2(p2_r, np2_r));
  239. pix1 += line_size;
  240. p2_l = np2_l;
  241. p2_r = np2_r;
  242. } while (--h);
  243. }
  244. return result;
  245. }
  246. int pix_abs16x16_xy2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  247. {
  248. int result = 0;
  249. uint64_t p1_l, p1_r;
  250. uint64_t p2_l, p2_r, p2_x;
  251. p1_l = ldq(pix1);
  252. p1_r = ldq(pix1 + 8);
  253. if ((size_t) pix2 & 0x7) { /* could be optimized a lot */
  254. p2_l = uldq(pix2);
  255. p2_r = uldq(pix2 + 8);
  256. p2_x = (uint64_t) pix2[16] << 56;
  257. } else {
  258. p2_l = ldq(pix2);
  259. p2_r = ldq(pix2 + 8);
  260. p2_x = ldq(pix2 + 16) << 56;
  261. }
  262. do {
  263. uint64_t np1_l, np1_r;
  264. uint64_t np2_l, np2_r, np2_x;
  265. pix1 += line_size;
  266. pix2 += line_size;
  267. np1_l = ldq(pix1);
  268. np1_r = ldq(pix1 + 8);
  269. if ((size_t) pix2 & 0x7) { /* could be optimized a lot */
  270. np2_l = uldq(pix2);
  271. np2_r = uldq(pix2 + 8);
  272. np2_x = (uint64_t) pix2[16] << 56;
  273. } else {
  274. np2_l = ldq(pix2);
  275. np2_r = ldq(pix2 + 8);
  276. np2_x = ldq(pix2 + 16) << 56;
  277. }
  278. result += perr(p1_l,
  279. avg4( p2_l, ( p2_l >> 8) | ((uint64_t) p2_r << 56),
  280. np2_l, (np2_l >> 8) | ((uint64_t) np2_r << 56)))
  281. + perr(p1_r,
  282. avg4( p2_r, ( p2_r >> 8) | ((uint64_t) p2_x),
  283. np2_r, (np2_r >> 8) | ((uint64_t) np2_x)));
  284. p1_l = np1_l;
  285. p1_r = np1_r;
  286. p2_l = np2_l;
  287. p2_r = np2_r;
  288. p2_x = np2_x;
  289. } while (--h);
  290. return result;
  291. }