You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

327 lines
11KB

  1. /*
  2. * Copyright (c) 2005 Zoltan Hidvegi <hzoli -a- hzoli -d- com>,
  3. * Loren Merritt
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * MMX optimized version of (put|avg)_h264_chroma_mc8.
  23. * H264_CHROMA_MC8_TMPL must be defined to the desired function name
  24. * H264_CHROMA_OP must be defined to empty for put and pavgb/pavgusb for avg
  25. * H264_CHROMA_MC8_MV0 must be defined to a (put|avg)_pixels8 function
  26. */
  27. static void H264_CHROMA_MC8_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
  28. {
  29. DECLARE_ALIGNED_8(uint64_t, AA);
  30. DECLARE_ALIGNED_8(uint64_t, DD);
  31. int i;
  32. if(y==0 && x==0) {
  33. /* no filter needed */
  34. H264_CHROMA_MC8_MV0(dst, src, stride, h);
  35. return;
  36. }
  37. assert(x<8 && y<8 && x>=0 && y>=0);
  38. if(y==0 || x==0)
  39. {
  40. /* 1 dimensional filter only */
  41. const int dxy = x ? 1 : stride;
  42. asm volatile(
  43. "movd %0, %%mm5\n\t"
  44. "movq %1, %%mm4\n\t"
  45. "punpcklwd %%mm5, %%mm5\n\t"
  46. "punpckldq %%mm5, %%mm5\n\t" /* mm5 = B = x */
  47. "movq %%mm4, %%mm6\n\t"
  48. "pxor %%mm7, %%mm7\n\t"
  49. "psubw %%mm5, %%mm4\n\t" /* mm4 = A = 8-x */
  50. "psrlw $1, %%mm6\n\t" /* mm6 = 4 */
  51. :: "rm"(x+y), "m"(ff_pw_8));
  52. for(i=0; i<h; i++) {
  53. asm volatile(
  54. /* mm0 = src[0..7], mm1 = src[1..8] */
  55. "movq %0, %%mm0\n\t"
  56. "movq %1, %%mm2\n\t"
  57. :: "m"(src[0]), "m"(src[dxy]));
  58. asm volatile(
  59. /* [mm0,mm1] = A * src[0..7] */
  60. /* [mm2,mm3] = B * src[1..8] */
  61. "movq %%mm0, %%mm1\n\t"
  62. "movq %%mm2, %%mm3\n\t"
  63. "punpcklbw %%mm7, %%mm0\n\t"
  64. "punpckhbw %%mm7, %%mm1\n\t"
  65. "punpcklbw %%mm7, %%mm2\n\t"
  66. "punpckhbw %%mm7, %%mm3\n\t"
  67. "pmullw %%mm4, %%mm0\n\t"
  68. "pmullw %%mm4, %%mm1\n\t"
  69. "pmullw %%mm5, %%mm2\n\t"
  70. "pmullw %%mm5, %%mm3\n\t"
  71. /* dst[0..7] = (A * src[0..7] + B * src[1..8] + 4) >> 3 */
  72. "paddw %%mm6, %%mm0\n\t"
  73. "paddw %%mm6, %%mm1\n\t"
  74. "paddw %%mm2, %%mm0\n\t"
  75. "paddw %%mm3, %%mm1\n\t"
  76. "psrlw $3, %%mm0\n\t"
  77. "psrlw $3, %%mm1\n\t"
  78. "packuswb %%mm1, %%mm0\n\t"
  79. H264_CHROMA_OP(%0, %%mm0)
  80. "movq %%mm0, %0\n\t"
  81. : "=m" (dst[0]));
  82. src += stride;
  83. dst += stride;
  84. }
  85. return;
  86. }
  87. /* general case, bilinear */
  88. asm volatile("movd %2, %%mm4\n\t"
  89. "movd %3, %%mm6\n\t"
  90. "punpcklwd %%mm4, %%mm4\n\t"
  91. "punpcklwd %%mm6, %%mm6\n\t"
  92. "punpckldq %%mm4, %%mm4\n\t" /* mm4 = x words */
  93. "punpckldq %%mm6, %%mm6\n\t" /* mm6 = y words */
  94. "movq %%mm4, %%mm5\n\t"
  95. "pmullw %%mm6, %%mm4\n\t" /* mm4 = x * y */
  96. "psllw $3, %%mm5\n\t"
  97. "psllw $3, %%mm6\n\t"
  98. "movq %%mm5, %%mm7\n\t"
  99. "paddw %%mm6, %%mm7\n\t"
  100. "movq %%mm4, %1\n\t" /* DD = x * y */
  101. "psubw %%mm4, %%mm5\n\t" /* mm5 = B = 8x - xy */
  102. "psubw %%mm4, %%mm6\n\t" /* mm6 = C = 8y - xy */
  103. "paddw %4, %%mm4\n\t"
  104. "psubw %%mm7, %%mm4\n\t" /* mm4 = A = xy - (8x+8y) + 64 */
  105. "pxor %%mm7, %%mm7\n\t"
  106. "movq %%mm4, %0\n\t"
  107. : "=m" (AA), "=m" (DD) : "rm" (x), "rm" (y), "m" (ff_pw_64));
  108. asm volatile(
  109. /* mm0 = src[0..7], mm1 = src[1..8] */
  110. "movq %0, %%mm0\n\t"
  111. "movq %1, %%mm1\n\t"
  112. : : "m" (src[0]), "m" (src[1]));
  113. for(i=0; i<h; i++) {
  114. src += stride;
  115. asm volatile(
  116. /* mm2 = A * src[0..3] + B * src[1..4] */
  117. /* mm3 = A * src[4..7] + B * src[5..8] */
  118. "movq %%mm0, %%mm2\n\t"
  119. "movq %%mm1, %%mm3\n\t"
  120. "punpckhbw %%mm7, %%mm0\n\t"
  121. "punpcklbw %%mm7, %%mm1\n\t"
  122. "punpcklbw %%mm7, %%mm2\n\t"
  123. "punpckhbw %%mm7, %%mm3\n\t"
  124. "pmullw %0, %%mm0\n\t"
  125. "pmullw %0, %%mm2\n\t"
  126. "pmullw %%mm5, %%mm1\n\t"
  127. "pmullw %%mm5, %%mm3\n\t"
  128. "paddw %%mm1, %%mm2\n\t"
  129. "paddw %%mm0, %%mm3\n\t"
  130. : : "m" (AA));
  131. asm volatile(
  132. /* [mm2,mm3] += C * src[0..7] */
  133. "movq %0, %%mm0\n\t"
  134. "movq %%mm0, %%mm1\n\t"
  135. "punpcklbw %%mm7, %%mm0\n\t"
  136. "punpckhbw %%mm7, %%mm1\n\t"
  137. "pmullw %%mm6, %%mm0\n\t"
  138. "pmullw %%mm6, %%mm1\n\t"
  139. "paddw %%mm0, %%mm2\n\t"
  140. "paddw %%mm1, %%mm3\n\t"
  141. : : "m" (src[0]));
  142. asm volatile(
  143. /* [mm2,mm3] += D * src[1..8] */
  144. "movq %1, %%mm1\n\t"
  145. "movq %%mm1, %%mm0\n\t"
  146. "movq %%mm1, %%mm4\n\t"
  147. "punpcklbw %%mm7, %%mm0\n\t"
  148. "punpckhbw %%mm7, %%mm4\n\t"
  149. "pmullw %2, %%mm0\n\t"
  150. "pmullw %2, %%mm4\n\t"
  151. "paddw %%mm0, %%mm2\n\t"
  152. "paddw %%mm4, %%mm3\n\t"
  153. "movq %0, %%mm0\n\t"
  154. : : "m" (src[0]), "m" (src[1]), "m" (DD));
  155. asm volatile(
  156. /* dst[0..7] = ([mm2,mm3] + 32) >> 6 */
  157. "paddw %1, %%mm2\n\t"
  158. "paddw %1, %%mm3\n\t"
  159. "psrlw $6, %%mm2\n\t"
  160. "psrlw $6, %%mm3\n\t"
  161. "packuswb %%mm3, %%mm2\n\t"
  162. H264_CHROMA_OP(%0, %%mm2)
  163. "movq %%mm2, %0\n\t"
  164. : "=m" (dst[0]) : "m" (ff_pw_32));
  165. dst+= stride;
  166. }
  167. }
  168. static void H264_CHROMA_MC4_TMPL(uint8_t *dst/*align 4*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
  169. {
  170. DECLARE_ALIGNED_8(uint64_t, AA);
  171. DECLARE_ALIGNED_8(uint64_t, DD);
  172. int i;
  173. /* no special case for mv=(0,0) in 4x*, since it's much less common than in 8x*.
  174. * could still save a few cycles, but maybe not worth the complexity. */
  175. assert(x<8 && y<8 && x>=0 && y>=0);
  176. asm volatile("movd %2, %%mm4\n\t"
  177. "movd %3, %%mm6\n\t"
  178. "punpcklwd %%mm4, %%mm4\n\t"
  179. "punpcklwd %%mm6, %%mm6\n\t"
  180. "punpckldq %%mm4, %%mm4\n\t" /* mm4 = x words */
  181. "punpckldq %%mm6, %%mm6\n\t" /* mm6 = y words */
  182. "movq %%mm4, %%mm5\n\t"
  183. "pmullw %%mm6, %%mm4\n\t" /* mm4 = x * y */
  184. "psllw $3, %%mm5\n\t"
  185. "psllw $3, %%mm6\n\t"
  186. "movq %%mm5, %%mm7\n\t"
  187. "paddw %%mm6, %%mm7\n\t"
  188. "movq %%mm4, %1\n\t" /* DD = x * y */
  189. "psubw %%mm4, %%mm5\n\t" /* mm5 = B = 8x - xy */
  190. "psubw %%mm4, %%mm6\n\t" /* mm6 = C = 8y - xy */
  191. "paddw %4, %%mm4\n\t"
  192. "psubw %%mm7, %%mm4\n\t" /* mm4 = A = xy - (8x+8y) + 64 */
  193. "pxor %%mm7, %%mm7\n\t"
  194. "movq %%mm4, %0\n\t"
  195. : "=m" (AA), "=m" (DD) : "rm" (x), "rm" (y), "m" (ff_pw_64));
  196. asm volatile(
  197. /* mm0 = src[0..3], mm1 = src[1..4] */
  198. "movd %0, %%mm0\n\t"
  199. "movd %1, %%mm1\n\t"
  200. "punpcklbw %%mm7, %%mm0\n\t"
  201. "punpcklbw %%mm7, %%mm1\n\t"
  202. : : "m" (src[0]), "m" (src[1]));
  203. for(i=0; i<h; i++) {
  204. asm volatile(
  205. /* mm2 = A * src[0..3] + B * src[1..4] */
  206. "movq %%mm0, %%mm2\n\t"
  207. "pmullw %0, %%mm2\n\t"
  208. "pmullw %%mm5, %%mm1\n\t"
  209. "paddw %%mm1, %%mm2\n\t"
  210. : : "m" (AA));
  211. src += stride;
  212. asm volatile(
  213. /* mm0 = src[0..3], mm1 = src[1..4] */
  214. "movd %0, %%mm0\n\t"
  215. "movd %1, %%mm1\n\t"
  216. "punpcklbw %%mm7, %%mm0\n\t"
  217. "punpcklbw %%mm7, %%mm1\n\t"
  218. : : "m" (src[0]), "m" (src[1]));
  219. asm volatile(
  220. /* mm2 += C * src[0..3] + D * src[1..4] */
  221. "movq %%mm0, %%mm3\n\t"
  222. "movq %%mm1, %%mm4\n\t"
  223. "pmullw %%mm6, %%mm3\n\t"
  224. "pmullw %0, %%mm4\n\t"
  225. "paddw %%mm3, %%mm2\n\t"
  226. "paddw %%mm4, %%mm2\n\t"
  227. : : "m" (DD));
  228. asm volatile(
  229. /* dst[0..3] = pack((mm2 + 32) >> 6) */
  230. "paddw %1, %%mm2\n\t"
  231. "psrlw $6, %%mm2\n\t"
  232. "packuswb %%mm7, %%mm2\n\t"
  233. H264_CHROMA_OP4(%0, %%mm2, %%mm3)
  234. "movd %%mm2, %0\n\t"
  235. : "=m" (dst[0]) : "m" (ff_pw_32));
  236. dst += stride;
  237. }
  238. }
  239. #ifdef H264_CHROMA_MC2_TMPL
  240. static void H264_CHROMA_MC2_TMPL(uint8_t *dst/*align 2*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
  241. {
  242. int CD=((1<<16)-1)*x*y + 8*y;
  243. int AB=((8<<16)-8)*x + 64 - CD;
  244. int i;
  245. asm volatile(
  246. /* mm5 = {A,B,A,B} */
  247. /* mm6 = {C,D,C,D} */
  248. "movd %0, %%mm5\n\t"
  249. "movd %1, %%mm6\n\t"
  250. "punpckldq %%mm5, %%mm5\n\t"
  251. "punpckldq %%mm6, %%mm6\n\t"
  252. "pxor %%mm7, %%mm7\n\t"
  253. :: "r"(AB), "r"(CD));
  254. asm volatile(
  255. /* mm0 = src[0,1,1,2] */
  256. "movd %0, %%mm0\n\t"
  257. "punpcklbw %%mm7, %%mm0\n\t"
  258. "pshufw $0x94, %%mm0, %%mm0\n\t"
  259. :: "m"(src[0]));
  260. for(i=0; i<h; i++) {
  261. asm volatile(
  262. /* mm1 = A * src[0,1] + B * src[1,2] */
  263. "movq %%mm0, %%mm1\n\t"
  264. "pmaddwd %%mm5, %%mm1\n\t"
  265. ::);
  266. src += stride;
  267. asm volatile(
  268. /* mm0 = src[0,1,1,2] */
  269. "movd %0, %%mm0\n\t"
  270. "punpcklbw %%mm7, %%mm0\n\t"
  271. "pshufw $0x94, %%mm0, %%mm0\n\t"
  272. :: "m"(src[0]));
  273. asm volatile(
  274. /* mm1 += C * src[0,1] + D * src[1,2] */
  275. "movq %%mm0, %%mm2\n\t"
  276. "pmaddwd %%mm6, %%mm2\n\t"
  277. "paddw %%mm2, %%mm1\n\t"
  278. ::);
  279. asm volatile(
  280. /* dst[0,1] = pack((mm1 + 32) >> 6) */
  281. "paddw %1, %%mm1\n\t"
  282. "psrlw $6, %%mm1\n\t"
  283. "packssdw %%mm7, %%mm1\n\t"
  284. "packuswb %%mm7, %%mm1\n\t"
  285. /* writes garbage to the right of dst.
  286. * ok because partitions are processed from left to right. */
  287. H264_CHROMA_OP4(%0, %%mm1, %%mm3)
  288. "movd %%mm1, %0\n\t"
  289. : "=m" (dst[0]) : "m" (ff_pw_32));
  290. dst += stride;
  291. }
  292. }
  293. #endif