You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

309 lines
11KB

  1. /*
  2. * Copyright (c) 2005 Zoltan Hidvegi <hzoli -a- hzoli -d- com>,
  3. * Loren Merritt
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * MMX optimized version of (put|avg)_h264_chroma_mc8.
  23. * H264_CHROMA_MC8_TMPL must be defined to the desired function name
  24. * H264_CHROMA_OP must be defined to empty for put and pavgb/pavgusb for avg
  25. * H264_CHROMA_MC8_MV0 must be defined to a (put|avg)_pixels8 function
  26. */
  27. static void H264_CHROMA_MC8_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y, int rnd)
  28. {
  29. const uint64_t *rnd_reg;
  30. DECLARE_ALIGNED_8(uint64_t, AA);
  31. DECLARE_ALIGNED_8(uint64_t, DD);
  32. int i;
  33. if(y==0 && x==0) {
  34. /* no filter needed */
  35. H264_CHROMA_MC8_MV0(dst, src, stride, h);
  36. return;
  37. }
  38. assert(x<8 && y<8 && x>=0 && y>=0);
  39. if(y==0 || x==0)
  40. {
  41. /* 1 dimensional filter only */
  42. const int dxy = x ? 1 : stride;
  43. rnd_reg = rnd ? &ff_pw_4 : &ff_pw_3;
  44. __asm__ volatile(
  45. "movd %0, %%mm5\n\t"
  46. "movq %1, %%mm4\n\t"
  47. "movq %2, %%mm6\n\t" /* mm6 = rnd */
  48. "punpcklwd %%mm5, %%mm5\n\t"
  49. "punpckldq %%mm5, %%mm5\n\t" /* mm5 = B = x */
  50. "pxor %%mm7, %%mm7\n\t"
  51. "psubw %%mm5, %%mm4\n\t" /* mm4 = A = 8-x */
  52. :: "rm"(x+y), "m"(ff_pw_8), "m"(*rnd_reg));
  53. for(i=0; i<h; i++) {
  54. __asm__ volatile(
  55. /* mm0 = src[0..7], mm1 = src[1..8] */
  56. "movq %0, %%mm0\n\t"
  57. "movq %1, %%mm2\n\t"
  58. :: "m"(src[0]), "m"(src[dxy]));
  59. __asm__ volatile(
  60. /* [mm0,mm1] = A * src[0..7] */
  61. /* [mm2,mm3] = B * src[1..8] */
  62. "movq %%mm0, %%mm1\n\t"
  63. "movq %%mm2, %%mm3\n\t"
  64. "punpcklbw %%mm7, %%mm0\n\t"
  65. "punpckhbw %%mm7, %%mm1\n\t"
  66. "punpcklbw %%mm7, %%mm2\n\t"
  67. "punpckhbw %%mm7, %%mm3\n\t"
  68. "pmullw %%mm4, %%mm0\n\t"
  69. "pmullw %%mm4, %%mm1\n\t"
  70. "pmullw %%mm5, %%mm2\n\t"
  71. "pmullw %%mm5, %%mm3\n\t"
  72. /* dst[0..7] = (A * src[0..7] + B * src[1..8] + 4) >> 3 */
  73. "paddw %%mm6, %%mm0\n\t"
  74. "paddw %%mm6, %%mm1\n\t"
  75. "paddw %%mm2, %%mm0\n\t"
  76. "paddw %%mm3, %%mm1\n\t"
  77. "psrlw $3, %%mm0\n\t"
  78. "psrlw $3, %%mm1\n\t"
  79. "packuswb %%mm1, %%mm0\n\t"
  80. H264_CHROMA_OP(%0, %%mm0)
  81. "movq %%mm0, %0\n\t"
  82. : "=m" (dst[0]));
  83. src += stride;
  84. dst += stride;
  85. }
  86. return;
  87. }
  88. /* general case, bilinear */
  89. rnd_reg = rnd ? &ff_pw_32.a : &ff_pw_28.a;
  90. __asm__ volatile("movd %2, %%mm4\n\t"
  91. "movd %3, %%mm6\n\t"
  92. "punpcklwd %%mm4, %%mm4\n\t"
  93. "punpcklwd %%mm6, %%mm6\n\t"
  94. "punpckldq %%mm4, %%mm4\n\t" /* mm4 = x words */
  95. "punpckldq %%mm6, %%mm6\n\t" /* mm6 = y words */
  96. "movq %%mm4, %%mm5\n\t"
  97. "pmullw %%mm6, %%mm4\n\t" /* mm4 = x * y */
  98. "psllw $3, %%mm5\n\t"
  99. "psllw $3, %%mm6\n\t"
  100. "movq %%mm5, %%mm7\n\t"
  101. "paddw %%mm6, %%mm7\n\t"
  102. "movq %%mm4, %1\n\t" /* DD = x * y */
  103. "psubw %%mm4, %%mm5\n\t" /* mm5 = B = 8x - xy */
  104. "psubw %%mm4, %%mm6\n\t" /* mm6 = C = 8y - xy */
  105. "paddw %4, %%mm4\n\t"
  106. "psubw %%mm7, %%mm4\n\t" /* mm4 = A = xy - (8x+8y) + 64 */
  107. "pxor %%mm7, %%mm7\n\t"
  108. "movq %%mm4, %0\n\t"
  109. : "=m" (AA), "=m" (DD) : "rm" (x), "rm" (y), "m" (ff_pw_64));
  110. __asm__ volatile(
  111. /* mm0 = src[0..7], mm1 = src[1..8] */
  112. "movq %0, %%mm0\n\t"
  113. "movq %1, %%mm1\n\t"
  114. : : "m" (src[0]), "m" (src[1]));
  115. for(i=0; i<h; i++) {
  116. src += stride;
  117. __asm__ volatile(
  118. /* mm2 = A * src[0..3] + B * src[1..4] */
  119. /* mm3 = A * src[4..7] + B * src[5..8] */
  120. "movq %%mm0, %%mm2\n\t"
  121. "movq %%mm1, %%mm3\n\t"
  122. "punpckhbw %%mm7, %%mm0\n\t"
  123. "punpcklbw %%mm7, %%mm1\n\t"
  124. "punpcklbw %%mm7, %%mm2\n\t"
  125. "punpckhbw %%mm7, %%mm3\n\t"
  126. "pmullw %0, %%mm0\n\t"
  127. "pmullw %0, %%mm2\n\t"
  128. "pmullw %%mm5, %%mm1\n\t"
  129. "pmullw %%mm5, %%mm3\n\t"
  130. "paddw %%mm1, %%mm2\n\t"
  131. "paddw %%mm0, %%mm3\n\t"
  132. : : "m" (AA));
  133. __asm__ volatile(
  134. /* [mm2,mm3] += C * src[0..7] */
  135. "movq %0, %%mm0\n\t"
  136. "movq %%mm0, %%mm1\n\t"
  137. "punpcklbw %%mm7, %%mm0\n\t"
  138. "punpckhbw %%mm7, %%mm1\n\t"
  139. "pmullw %%mm6, %%mm0\n\t"
  140. "pmullw %%mm6, %%mm1\n\t"
  141. "paddw %%mm0, %%mm2\n\t"
  142. "paddw %%mm1, %%mm3\n\t"
  143. : : "m" (src[0]));
  144. __asm__ volatile(
  145. /* [mm2,mm3] += D * src[1..8] */
  146. "movq %1, %%mm1\n\t"
  147. "movq %%mm1, %%mm0\n\t"
  148. "movq %%mm1, %%mm4\n\t"
  149. "punpcklbw %%mm7, %%mm0\n\t"
  150. "punpckhbw %%mm7, %%mm4\n\t"
  151. "pmullw %2, %%mm0\n\t"
  152. "pmullw %2, %%mm4\n\t"
  153. "paddw %%mm0, %%mm2\n\t"
  154. "paddw %%mm4, %%mm3\n\t"
  155. "movq %0, %%mm0\n\t"
  156. : : "m" (src[0]), "m" (src[1]), "m" (DD));
  157. __asm__ volatile(
  158. /* dst[0..7] = ([mm2,mm3] + 32) >> 6 */
  159. "paddw %1, %%mm2\n\t"
  160. "paddw %1, %%mm3\n\t"
  161. "psrlw $6, %%mm2\n\t"
  162. "psrlw $6, %%mm3\n\t"
  163. "packuswb %%mm3, %%mm2\n\t"
  164. H264_CHROMA_OP(%0, %%mm2)
  165. "movq %%mm2, %0\n\t"
  166. : "=m" (dst[0]) : "m" (*rnd_reg));
  167. dst+= stride;
  168. }
  169. }
  170. static void H264_CHROMA_MC4_TMPL(uint8_t *dst/*align 4*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
  171. {
  172. __asm__ volatile(
  173. "pxor %%mm7, %%mm7 \n\t"
  174. "movd %5, %%mm2 \n\t"
  175. "movd %6, %%mm3 \n\t"
  176. "movq "MANGLE(ff_pw_8)", %%mm4\n\t"
  177. "movq "MANGLE(ff_pw_8)", %%mm5\n\t"
  178. "punpcklwd %%mm2, %%mm2 \n\t"
  179. "punpcklwd %%mm3, %%mm3 \n\t"
  180. "punpcklwd %%mm2, %%mm2 \n\t"
  181. "punpcklwd %%mm3, %%mm3 \n\t"
  182. "psubw %%mm2, %%mm4 \n\t"
  183. "psubw %%mm3, %%mm5 \n\t"
  184. "movd (%1), %%mm0 \n\t"
  185. "movd 1(%1), %%mm6 \n\t"
  186. "add %3, %1 \n\t"
  187. "punpcklbw %%mm7, %%mm0 \n\t"
  188. "punpcklbw %%mm7, %%mm6 \n\t"
  189. "pmullw %%mm4, %%mm0 \n\t"
  190. "pmullw %%mm2, %%mm6 \n\t"
  191. "paddw %%mm0, %%mm6 \n\t"
  192. "1: \n\t"
  193. "movd (%1), %%mm0 \n\t"
  194. "movd 1(%1), %%mm1 \n\t"
  195. "add %3, %1 \n\t"
  196. "punpcklbw %%mm7, %%mm0 \n\t"
  197. "punpcklbw %%mm7, %%mm1 \n\t"
  198. "pmullw %%mm4, %%mm0 \n\t"
  199. "pmullw %%mm2, %%mm1 \n\t"
  200. "paddw %%mm0, %%mm1 \n\t"
  201. "movq %%mm1, %%mm0 \n\t"
  202. "pmullw %%mm5, %%mm6 \n\t"
  203. "pmullw %%mm3, %%mm1 \n\t"
  204. "paddw %4, %%mm6 \n\t"
  205. "paddw %%mm6, %%mm1 \n\t"
  206. "psrlw $6, %%mm1 \n\t"
  207. "packuswb %%mm1, %%mm1 \n\t"
  208. H264_CHROMA_OP4((%0), %%mm1, %%mm6)
  209. "movd %%mm1, (%0) \n\t"
  210. "add %3, %0 \n\t"
  211. "movd (%1), %%mm6 \n\t"
  212. "movd 1(%1), %%mm1 \n\t"
  213. "add %3, %1 \n\t"
  214. "punpcklbw %%mm7, %%mm6 \n\t"
  215. "punpcklbw %%mm7, %%mm1 \n\t"
  216. "pmullw %%mm4, %%mm6 \n\t"
  217. "pmullw %%mm2, %%mm1 \n\t"
  218. "paddw %%mm6, %%mm1 \n\t"
  219. "movq %%mm1, %%mm6 \n\t"
  220. "pmullw %%mm5, %%mm0 \n\t"
  221. "pmullw %%mm3, %%mm1 \n\t"
  222. "paddw %4, %%mm0 \n\t"
  223. "paddw %%mm0, %%mm1 \n\t"
  224. "psrlw $6, %%mm1 \n\t"
  225. "packuswb %%mm1, %%mm1 \n\t"
  226. H264_CHROMA_OP4((%0), %%mm1, %%mm0)
  227. "movd %%mm1, (%0) \n\t"
  228. "add %3, %0 \n\t"
  229. "sub $2, %2 \n\t"
  230. "jnz 1b \n\t"
  231. : "+r"(dst), "+r"(src), "+r"(h)
  232. : "r"((x86_reg)stride), "m"(ff_pw_32), "m"(x), "m"(y)
  233. );
  234. }
  235. #ifdef H264_CHROMA_MC2_TMPL
  236. static void H264_CHROMA_MC2_TMPL(uint8_t *dst/*align 2*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
  237. {
  238. int tmp = ((1<<16)-1)*x + 8;
  239. int CD= tmp*y;
  240. int AB= (tmp<<3) - CD;
  241. __asm__ volatile(
  242. /* mm5 = {A,B,A,B} */
  243. /* mm6 = {C,D,C,D} */
  244. "movd %0, %%mm5\n\t"
  245. "movd %1, %%mm6\n\t"
  246. "punpckldq %%mm5, %%mm5\n\t"
  247. "punpckldq %%mm6, %%mm6\n\t"
  248. "pxor %%mm7, %%mm7\n\t"
  249. /* mm0 = src[0,1,1,2] */
  250. "movd %2, %%mm2\n\t"
  251. "punpcklbw %%mm7, %%mm2\n\t"
  252. "pshufw $0x94, %%mm2, %%mm2\n\t"
  253. :: "r"(AB), "r"(CD), "m"(src[0]));
  254. __asm__ volatile(
  255. "1:\n\t"
  256. "add %4, %1\n\t"
  257. /* mm1 = A * src[0,1] + B * src[1,2] */
  258. "movq %%mm2, %%mm1\n\t"
  259. "pmaddwd %%mm5, %%mm1\n\t"
  260. /* mm0 = src[0,1,1,2] */
  261. "movd (%1), %%mm0\n\t"
  262. "punpcklbw %%mm7, %%mm0\n\t"
  263. "pshufw $0x94, %%mm0, %%mm0\n\t"
  264. /* mm1 += C * src[0,1] + D * src[1,2] */
  265. "movq %%mm0, %%mm2\n\t"
  266. "pmaddwd %%mm6, %%mm0\n\t"
  267. "paddw %3, %%mm1\n\t"
  268. "paddw %%mm0, %%mm1\n\t"
  269. /* dst[0,1] = pack((mm1 + 32) >> 6) */
  270. "psrlw $6, %%mm1\n\t"
  271. "packssdw %%mm7, %%mm1\n\t"
  272. "packuswb %%mm7, %%mm1\n\t"
  273. H264_CHROMA_OP4((%0), %%mm1, %%mm3)
  274. "movd %%mm1, %%esi\n\t"
  275. "movw %%si, (%0)\n\t"
  276. "add %4, %0\n\t"
  277. "sub $1, %2\n\t"
  278. "jnz 1b\n\t"
  279. : "+r" (dst), "+r"(src), "+r"(h)
  280. : "m" (ff_pw_32), "r"((x86_reg)stride)
  281. : "%esi");
  282. }
  283. #endif