You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1410 lines
61KB

  1. /*
  2. * This is optimized for sh, which have post increment addressing (*p++).
  3. * Some CPU may be index (p[n]) faster than post increment (*p++).
  4. *
  5. * copyright (c) 2001-2003 BERO <bero@geocities.co.jp>
  6. *
  7. * This file is part of Libav.
  8. *
  9. * Libav is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * Libav is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with Libav; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #include "libavutil/common.h"
  24. #define PIXOP2(OPNAME, OP) \
  25. \
  26. static inline void OPNAME ## _pixels4_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  27. {\
  28. do {\
  29. OP(LP(dst ),rnd_avg32(LPC(src1 ),LPC(src2 )) ); \
  30. src1+=src_stride1; \
  31. src2+=src_stride2; \
  32. dst+=dst_stride; \
  33. } while(--h); \
  34. }\
  35. \
  36. static inline void OPNAME ## _pixels4_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  37. {\
  38. do {\
  39. OP(LP(dst ),rnd_avg32(AV_RN32(src1 ),LPC(src2 )) ); \
  40. src1+=src_stride1; \
  41. src2+=src_stride2; \
  42. dst+=dst_stride; \
  43. } while(--h); \
  44. }\
  45. \
  46. static inline void OPNAME ## _no_rnd_pixels16_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  47. {\
  48. do {\
  49. OP(LP(dst ),no_rnd_avg32(AV_RN32(src1 ),LPC(src2 )) ); \
  50. OP(LP(dst+4),no_rnd_avg32(AV_RN32(src1+4),LPC(src2+4)) ); \
  51. OP(LP(dst+8),no_rnd_avg32(AV_RN32(src1+8),LPC(src2+8)) ); \
  52. OP(LP(dst+12),no_rnd_avg32(AV_RN32(src1+12),LPC(src2+12)) ); \
  53. src1+=src_stride1; \
  54. src2+=src_stride2; \
  55. dst+=dst_stride; \
  56. } while(--h); \
  57. }\
  58. \
  59. static inline void OPNAME ## _pixels16_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  60. {\
  61. do {\
  62. OP(LP(dst ),rnd_avg32(AV_RN32(src1 ),LPC(src2 )) ); \
  63. OP(LP(dst+4),rnd_avg32(AV_RN32(src1+4),LPC(src2+4)) ); \
  64. OP(LP(dst+8),rnd_avg32(AV_RN32(src1+8),LPC(src2+8)) ); \
  65. OP(LP(dst+12),rnd_avg32(AV_RN32(src1+12),LPC(src2+12)) ); \
  66. src1+=src_stride1; \
  67. src2+=src_stride2; \
  68. dst+=dst_stride; \
  69. } while(--h); \
  70. }\
  71. \
  72. static inline void OPNAME ## _no_rnd_pixels8_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  73. {\
  74. do { /* onlye src2 aligned */\
  75. OP(LP(dst ),no_rnd_avg32(AV_RN32(src1 ),LPC(src2 )) ); \
  76. OP(LP(dst+4),no_rnd_avg32(AV_RN32(src1+4),LPC(src2+4)) ); \
  77. src1+=src_stride1; \
  78. src2+=src_stride2; \
  79. dst+=dst_stride; \
  80. } while(--h); \
  81. }\
  82. \
  83. static inline void OPNAME ## _pixels8_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  84. {\
  85. do {\
  86. OP(LP(dst ),rnd_avg32(AV_RN32(src1 ),LPC(src2 )) ); \
  87. OP(LP(dst+4),rnd_avg32(AV_RN32(src1+4),LPC(src2+4)) ); \
  88. src1+=src_stride1; \
  89. src2+=src_stride2; \
  90. dst+=dst_stride; \
  91. } while(--h); \
  92. }\
  93. \
  94. static inline void OPNAME ## _no_rnd_pixels8_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  95. {\
  96. do {\
  97. OP(LP(dst ),no_rnd_avg32(LPC(src1 ),LPC(src2 )) ); \
  98. OP(LP(dst+4),no_rnd_avg32(LPC(src1+4),LPC(src2+4)) ); \
  99. src1+=src_stride1; \
  100. src2+=src_stride2; \
  101. dst+=dst_stride; \
  102. } while(--h); \
  103. }\
  104. \
  105. static inline void OPNAME ## _pixels8_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  106. {\
  107. do {\
  108. OP(LP(dst ),rnd_avg32(LPC(src1 ),LPC(src2 )) ); \
  109. OP(LP(dst+4),rnd_avg32(LPC(src1+4),LPC(src2+4)) ); \
  110. src1+=src_stride1; \
  111. src2+=src_stride2; \
  112. dst+=dst_stride; \
  113. } while(--h); \
  114. }\
  115. \
  116. static inline void OPNAME ## _no_rnd_pixels16_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  117. {\
  118. do {\
  119. OP(LP(dst ),no_rnd_avg32(LPC(src1 ),LPC(src2 )) ); \
  120. OP(LP(dst+4),no_rnd_avg32(LPC(src1+4),LPC(src2+4)) ); \
  121. OP(LP(dst+8),no_rnd_avg32(LPC(src1+8),LPC(src2+8)) ); \
  122. OP(LP(dst+12),no_rnd_avg32(LPC(src1+12),LPC(src2+12)) ); \
  123. src1+=src_stride1; \
  124. src2+=src_stride2; \
  125. dst+=dst_stride; \
  126. } while(--h); \
  127. }\
  128. \
  129. static inline void OPNAME ## _pixels16_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  130. {\
  131. do {\
  132. OP(LP(dst ),rnd_avg32(LPC(src1 ),LPC(src2 )) ); \
  133. OP(LP(dst+4),rnd_avg32(LPC(src1+4),LPC(src2+4)) ); \
  134. OP(LP(dst+8),rnd_avg32(LPC(src1+8),LPC(src2+8)) ); \
  135. OP(LP(dst+12),rnd_avg32(LPC(src1+12),LPC(src2+12)) ); \
  136. src1+=src_stride1; \
  137. src2+=src_stride2; \
  138. dst+=dst_stride; \
  139. } while(--h); \
  140. }\
  141. \
  142. static inline void OPNAME ## _no_rnd_pixels16_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  143. { OPNAME ## _no_rnd_pixels16_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
  144. \
  145. static inline void OPNAME ## _pixels16_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  146. { OPNAME ## _pixels16_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
  147. \
  148. static inline void OPNAME ## _no_rnd_pixels8_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  149. { OPNAME ## _no_rnd_pixels8_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
  150. \
  151. static inline void OPNAME ## _pixels8_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  152. { OPNAME ## _pixels8_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
  153. \
  154. static inline void OPNAME ## _pixels8_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  155. do { \
  156. uint32_t a0,a1,a2,a3; \
  157. UNPACK(a0,a1,LPC(src1),LPC(src2)); \
  158. UNPACK(a2,a3,LPC(src3),LPC(src4)); \
  159. OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
  160. UNPACK(a0,a1,LPC(src1+4),LPC(src2+4)); \
  161. UNPACK(a2,a3,LPC(src3+4),LPC(src4+4)); \
  162. OP(LP(dst+4),rnd_PACK(a0,a1,a2,a3)); \
  163. src1+=src_stride1;\
  164. src2+=src_stride2;\
  165. src3+=src_stride3;\
  166. src4+=src_stride4;\
  167. dst+=dst_stride;\
  168. } while(--h); \
  169. } \
  170. \
  171. static inline void OPNAME ## _no_rnd_pixels8_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  172. do { \
  173. uint32_t a0,a1,a2,a3; \
  174. UNPACK(a0,a1,LPC(src1),LPC(src2)); \
  175. UNPACK(a2,a3,LPC(src3),LPC(src4)); \
  176. OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
  177. UNPACK(a0,a1,LPC(src1+4),LPC(src2+4)); \
  178. UNPACK(a2,a3,LPC(src3+4),LPC(src4+4)); \
  179. OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
  180. src1+=src_stride1;\
  181. src2+=src_stride2;\
  182. src3+=src_stride3;\
  183. src4+=src_stride4;\
  184. dst+=dst_stride;\
  185. } while(--h); \
  186. } \
  187. \
  188. static inline void OPNAME ## _pixels8_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  189. do { \
  190. uint32_t a0,a1,a2,a3; /* src1 only not aligned */\
  191. UNPACK(a0,a1,AV_RN32(src1),LPC(src2)); \
  192. UNPACK(a2,a3,LPC(src3),LPC(src4)); \
  193. OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
  194. UNPACK(a0,a1,AV_RN32(src1+4),LPC(src2+4)); \
  195. UNPACK(a2,a3,LPC(src3+4),LPC(src4+4)); \
  196. OP(LP(dst+4),rnd_PACK(a0,a1,a2,a3)); \
  197. src1+=src_stride1;\
  198. src2+=src_stride2;\
  199. src3+=src_stride3;\
  200. src4+=src_stride4;\
  201. dst+=dst_stride;\
  202. } while(--h); \
  203. } \
  204. \
  205. static inline void OPNAME ## _no_rnd_pixels8_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  206. do { \
  207. uint32_t a0,a1,a2,a3; \
  208. UNPACK(a0,a1,AV_RN32(src1),LPC(src2)); \
  209. UNPACK(a2,a3,LPC(src3),LPC(src4)); \
  210. OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
  211. UNPACK(a0,a1,AV_RN32(src1+4),LPC(src2+4)); \
  212. UNPACK(a2,a3,LPC(src3+4),LPC(src4+4)); \
  213. OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
  214. src1+=src_stride1;\
  215. src2+=src_stride2;\
  216. src3+=src_stride3;\
  217. src4+=src_stride4;\
  218. dst+=dst_stride;\
  219. } while(--h); \
  220. } \
  221. \
  222. static inline void OPNAME ## _pixels16_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  223. do { \
  224. uint32_t a0,a1,a2,a3; \
  225. UNPACK(a0,a1,LPC(src1),LPC(src2)); \
  226. UNPACK(a2,a3,LPC(src3),LPC(src4)); \
  227. OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
  228. UNPACK(a0,a1,LPC(src1+4),LPC(src2+4)); \
  229. UNPACK(a2,a3,LPC(src3+4),LPC(src4+4)); \
  230. OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
  231. UNPACK(a0,a1,LPC(src1+8),LPC(src2+8)); \
  232. UNPACK(a2,a3,LPC(src3+8),LPC(src4+8)); \
  233. OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
  234. UNPACK(a0,a1,LPC(src1+12),LPC(src2+12)); \
  235. UNPACK(a2,a3,LPC(src3+12),LPC(src4+12)); \
  236. OP(LP(dst+12),rnd_PACK(a0,a1,a2,a3)); \
  237. src1+=src_stride1;\
  238. src2+=src_stride2;\
  239. src3+=src_stride3;\
  240. src4+=src_stride4;\
  241. dst+=dst_stride;\
  242. } while(--h); \
  243. } \
  244. \
  245. static inline void OPNAME ## _no_rnd_pixels16_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  246. do { \
  247. uint32_t a0,a1,a2,a3; \
  248. UNPACK(a0,a1,LPC(src1),LPC(src2)); \
  249. UNPACK(a2,a3,LPC(src3),LPC(src4)); \
  250. OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
  251. UNPACK(a0,a1,LPC(src1+4),LPC(src2+4)); \
  252. UNPACK(a2,a3,LPC(src3+4),LPC(src4+4)); \
  253. OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
  254. UNPACK(a0,a1,LPC(src1+8),LPC(src2+8)); \
  255. UNPACK(a2,a3,LPC(src3+8),LPC(src4+8)); \
  256. OP(LP(dst+8),no_rnd_PACK(a0,a1,a2,a3)); \
  257. UNPACK(a0,a1,LPC(src1+12),LPC(src2+12)); \
  258. UNPACK(a2,a3,LPC(src3+12),LPC(src4+12)); \
  259. OP(LP(dst+12),no_rnd_PACK(a0,a1,a2,a3)); \
  260. src1+=src_stride1;\
  261. src2+=src_stride2;\
  262. src3+=src_stride3;\
  263. src4+=src_stride4;\
  264. dst+=dst_stride;\
  265. } while(--h); \
  266. } \
  267. \
  268. static inline void OPNAME ## _pixels16_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  269. do { /* src1 is unaligned */\
  270. uint32_t a0,a1,a2,a3; \
  271. UNPACK(a0,a1,AV_RN32(src1),LPC(src2)); \
  272. UNPACK(a2,a3,LPC(src3),LPC(src4)); \
  273. OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
  274. UNPACK(a0,a1,AV_RN32(src1+4),LPC(src2+4)); \
  275. UNPACK(a2,a3,LPC(src3+4),LPC(src4+4)); \
  276. OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
  277. UNPACK(a0,a1,AV_RN32(src1+8),LPC(src2+8)); \
  278. UNPACK(a2,a3,LPC(src3+8),LPC(src4+8)); \
  279. OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
  280. UNPACK(a0,a1,AV_RN32(src1+12),LPC(src2+12)); \
  281. UNPACK(a2,a3,LPC(src3+12),LPC(src4+12)); \
  282. OP(LP(dst+12),rnd_PACK(a0,a1,a2,a3)); \
  283. src1+=src_stride1;\
  284. src2+=src_stride2;\
  285. src3+=src_stride3;\
  286. src4+=src_stride4;\
  287. dst+=dst_stride;\
  288. } while(--h); \
  289. } \
  290. \
  291. static inline void OPNAME ## _no_rnd_pixels16_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  292. do { \
  293. uint32_t a0,a1,a2,a3; \
  294. UNPACK(a0,a1,AV_RN32(src1),LPC(src2)); \
  295. UNPACK(a2,a3,LPC(src3),LPC(src4)); \
  296. OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
  297. UNPACK(a0,a1,AV_RN32(src1+4),LPC(src2+4)); \
  298. UNPACK(a2,a3,LPC(src3+4),LPC(src4+4)); \
  299. OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
  300. UNPACK(a0,a1,AV_RN32(src1+8),LPC(src2+8)); \
  301. UNPACK(a2,a3,LPC(src3+8),LPC(src4+8)); \
  302. OP(LP(dst+8),no_rnd_PACK(a0,a1,a2,a3)); \
  303. UNPACK(a0,a1,AV_RN32(src1+12),LPC(src2+12)); \
  304. UNPACK(a2,a3,LPC(src3+12),LPC(src4+12)); \
  305. OP(LP(dst+12),no_rnd_PACK(a0,a1,a2,a3)); \
  306. src1+=src_stride1;\
  307. src2+=src_stride2;\
  308. src3+=src_stride3;\
  309. src4+=src_stride4;\
  310. dst+=dst_stride;\
  311. } while(--h); \
  312. } \
  313. \
  314. #define op_avg(a, b) a = rnd_avg32(a,b)
  315. #define op_put(a, b) a = b
  316. PIXOP2(avg, op_avg)
  317. PIXOP2(put, op_put)
  318. #undef op_avg
  319. #undef op_put
  320. #define avg2(a,b) ((a+b+1)>>1)
  321. #define avg4(a,b,c,d) ((a+b+c+d+2)>>2)
  322. static void gmc1_c(uint8_t *dst, uint8_t *src, int stride, int h, int x16, int y16, int rounder)
  323. {
  324. const int A=(16-x16)*(16-y16);
  325. const int B=( x16)*(16-y16);
  326. const int C=(16-x16)*( y16);
  327. const int D=( x16)*( y16);
  328. do {
  329. int t0,t1,t2,t3;
  330. uint8_t *s0 = src;
  331. uint8_t *s1 = src+stride;
  332. t0 = *s0++; t2 = *s1++;
  333. t1 = *s0++; t3 = *s1++;
  334. dst[0]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
  335. t0 = *s0++; t2 = *s1++;
  336. dst[1]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
  337. t1 = *s0++; t3 = *s1++;
  338. dst[2]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
  339. t0 = *s0++; t2 = *s1++;
  340. dst[3]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
  341. t1 = *s0++; t3 = *s1++;
  342. dst[4]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
  343. t0 = *s0++; t2 = *s1++;
  344. dst[5]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
  345. t1 = *s0++; t3 = *s1++;
  346. dst[6]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
  347. t0 = *s0++; t2 = *s1++;
  348. dst[7]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
  349. dst+= stride;
  350. src+= stride;
  351. }while(--h);
  352. }
  353. static void gmc_c(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
  354. int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
  355. {
  356. int y, vx, vy;
  357. const int s= 1<<shift;
  358. width--;
  359. height--;
  360. for(y=0; y<h; y++){
  361. int x;
  362. vx= ox;
  363. vy= oy;
  364. for(x=0; x<8; x++){ //XXX FIXME optimize
  365. int src_x, src_y, frac_x, frac_y, index;
  366. src_x= vx>>16;
  367. src_y= vy>>16;
  368. frac_x= src_x&(s-1);
  369. frac_y= src_y&(s-1);
  370. src_x>>=shift;
  371. src_y>>=shift;
  372. if((unsigned)src_x < width){
  373. if((unsigned)src_y < height){
  374. index= src_x + src_y*stride;
  375. dst[y*stride + x]= ( ( src[index ]*(s-frac_x)
  376. + src[index +1]* frac_x )*(s-frac_y)
  377. + ( src[index+stride ]*(s-frac_x)
  378. + src[index+stride+1]* frac_x )* frac_y
  379. + r)>>(shift*2);
  380. }else{
  381. index= src_x + av_clip(src_y, 0, height)*stride;
  382. dst[y*stride + x]= ( ( src[index ]*(s-frac_x)
  383. + src[index +1]* frac_x )*s
  384. + r)>>(shift*2);
  385. }
  386. }else{
  387. if((unsigned)src_y < height){
  388. index= av_clip(src_x, 0, width) + src_y*stride;
  389. dst[y*stride + x]= ( ( src[index ]*(s-frac_y)
  390. + src[index+stride ]* frac_y )*s
  391. + r)>>(shift*2);
  392. }else{
  393. index= av_clip(src_x, 0, width) + av_clip(src_y, 0, height)*stride;
  394. dst[y*stride + x]= src[index ];
  395. }
  396. }
  397. vx+= dxx;
  398. vy+= dyx;
  399. }
  400. ox += dxy;
  401. oy += dyy;
  402. }
  403. }
  404. #define H264_CHROMA_MC(OPNAME, OP)\
  405. static void OPNAME ## h264_chroma_mc2_sh4(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
  406. const int A=(8-x)*(8-y);\
  407. const int B=( x)*(8-y);\
  408. const int C=(8-x)*( y);\
  409. const int D=( x)*( y);\
  410. \
  411. assert(x<8 && y<8 && x>=0 && y>=0);\
  412. \
  413. do {\
  414. int t0,t1,t2,t3; \
  415. uint8_t *s0 = src; \
  416. uint8_t *s1 = src+stride; \
  417. t0 = *s0++; t2 = *s1++; \
  418. t1 = *s0++; t3 = *s1++; \
  419. OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
  420. t0 = *s0++; t2 = *s1++; \
  421. OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
  422. dst+= stride;\
  423. src+= stride;\
  424. }while(--h);\
  425. }\
  426. \
  427. static void OPNAME ## h264_chroma_mc4_sh4(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
  428. const int A=(8-x)*(8-y);\
  429. const int B=( x)*(8-y);\
  430. const int C=(8-x)*( y);\
  431. const int D=( x)*( y);\
  432. \
  433. assert(x<8 && y<8 && x>=0 && y>=0);\
  434. \
  435. do {\
  436. int t0,t1,t2,t3; \
  437. uint8_t *s0 = src; \
  438. uint8_t *s1 = src+stride; \
  439. t0 = *s0++; t2 = *s1++; \
  440. t1 = *s0++; t3 = *s1++; \
  441. OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
  442. t0 = *s0++; t2 = *s1++; \
  443. OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
  444. t1 = *s0++; t3 = *s1++; \
  445. OP(dst[2], (A*t0 + B*t1 + C*t2 + D*t3));\
  446. t0 = *s0++; t2 = *s1++; \
  447. OP(dst[3], (A*t1 + B*t0 + C*t3 + D*t2));\
  448. dst+= stride;\
  449. src+= stride;\
  450. }while(--h);\
  451. }\
  452. \
  453. static void OPNAME ## h264_chroma_mc8_sh4(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
  454. const int A=(8-x)*(8-y);\
  455. const int B=( x)*(8-y);\
  456. const int C=(8-x)*( y);\
  457. const int D=( x)*( y);\
  458. \
  459. assert(x<8 && y<8 && x>=0 && y>=0);\
  460. \
  461. do {\
  462. int t0,t1,t2,t3; \
  463. uint8_t *s0 = src; \
  464. uint8_t *s1 = src+stride; \
  465. t0 = *s0++; t2 = *s1++; \
  466. t1 = *s0++; t3 = *s1++; \
  467. OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
  468. t0 = *s0++; t2 = *s1++; \
  469. OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
  470. t1 = *s0++; t3 = *s1++; \
  471. OP(dst[2], (A*t0 + B*t1 + C*t2 + D*t3));\
  472. t0 = *s0++; t2 = *s1++; \
  473. OP(dst[3], (A*t1 + B*t0 + C*t3 + D*t2));\
  474. t1 = *s0++; t3 = *s1++; \
  475. OP(dst[4], (A*t0 + B*t1 + C*t2 + D*t3));\
  476. t0 = *s0++; t2 = *s1++; \
  477. OP(dst[5], (A*t1 + B*t0 + C*t3 + D*t2));\
  478. t1 = *s0++; t3 = *s1++; \
  479. OP(dst[6], (A*t0 + B*t1 + C*t2 + D*t3));\
  480. t0 = *s0++; t2 = *s1++; \
  481. OP(dst[7], (A*t1 + B*t0 + C*t3 + D*t2));\
  482. dst+= stride;\
  483. src+= stride;\
  484. }while(--h);\
  485. }
  486. #define op_avg(a, b) a = (((a)+(((b) + 32)>>6)+1)>>1)
  487. #define op_put(a, b) a = (((b) + 32)>>6)
  488. H264_CHROMA_MC(put_ , op_put)
  489. H264_CHROMA_MC(avg_ , op_avg)
  490. #undef op_avg
  491. #undef op_put
  492. #define QPEL_MC(r, OPNAME, RND, OP) \
  493. static void OPNAME ## mpeg4_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  494. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  495. do {\
  496. uint8_t *s = src; \
  497. int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
  498. src0= *s++;\
  499. src1= *s++;\
  500. src2= *s++;\
  501. src3= *s++;\
  502. src4= *s++;\
  503. OP(dst[0], (src0+src1)*20 - (src0+src2)*6 + (src1+src3)*3 - (src2+src4));\
  504. src5= *s++;\
  505. OP(dst[1], (src1+src2)*20 - (src0+src3)*6 + (src0+src4)*3 - (src1+src5));\
  506. src6= *s++;\
  507. OP(dst[2], (src2+src3)*20 - (src1+src4)*6 + (src0+src5)*3 - (src0+src6));\
  508. src7= *s++;\
  509. OP(dst[3], (src3+src4)*20 - (src2+src5)*6 + (src1+src6)*3 - (src0+src7));\
  510. src8= *s++;\
  511. OP(dst[4], (src4+src5)*20 - (src3+src6)*6 + (src2+src7)*3 - (src1+src8));\
  512. OP(dst[5], (src5+src6)*20 - (src4+src7)*6 + (src3+src8)*3 - (src2+src8));\
  513. OP(dst[6], (src6+src7)*20 - (src5+src8)*6 + (src4+src8)*3 - (src3+src7));\
  514. OP(dst[7], (src7+src8)*20 - (src6+src8)*6 + (src5+src7)*3 - (src4+src6));\
  515. dst+=dstStride;\
  516. src+=srcStride;\
  517. }while(--h);\
  518. }\
  519. \
  520. static void OPNAME ## mpeg4_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  521. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  522. int w=8;\
  523. do{\
  524. uint8_t *s = src, *d=dst;\
  525. int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
  526. src0 = *s; s+=srcStride; \
  527. src1 = *s; s+=srcStride; \
  528. src2 = *s; s+=srcStride; \
  529. src3 = *s; s+=srcStride; \
  530. src4 = *s; s+=srcStride; \
  531. OP(*d, (src0+src1)*20 - (src0+src2)*6 + (src1+src3)*3 - (src2+src4));d+=dstStride;\
  532. src5 = *s; s+=srcStride; \
  533. OP(*d, (src1+src2)*20 - (src0+src3)*6 + (src0+src4)*3 - (src1+src5));d+=dstStride;\
  534. src6 = *s; s+=srcStride; \
  535. OP(*d, (src2+src3)*20 - (src1+src4)*6 + (src0+src5)*3 - (src0+src6));d+=dstStride;\
  536. src7 = *s; s+=srcStride; \
  537. OP(*d, (src3+src4)*20 - (src2+src5)*6 + (src1+src6)*3 - (src0+src7));d+=dstStride;\
  538. src8 = *s; \
  539. OP(*d, (src4+src5)*20 - (src3+src6)*6 + (src2+src7)*3 - (src1+src8));d+=dstStride;\
  540. OP(*d, (src5+src6)*20 - (src4+src7)*6 + (src3+src8)*3 - (src2+src8));d+=dstStride;\
  541. OP(*d, (src6+src7)*20 - (src5+src8)*6 + (src4+src8)*3 - (src3+src7));d+=dstStride;\
  542. OP(*d, (src7+src8)*20 - (src6+src8)*6 + (src5+src7)*3 - (src4+src6));\
  543. dst++;\
  544. src++;\
  545. }while(--w);\
  546. }\
  547. \
  548. static void OPNAME ## mpeg4_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  549. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  550. do {\
  551. uint8_t *s = src;\
  552. int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
  553. int src9,src10,src11,src12,src13,src14,src15,src16;\
  554. src0= *s++;\
  555. src1= *s++;\
  556. src2= *s++;\
  557. src3= *s++;\
  558. src4= *s++;\
  559. OP(dst[ 0], (src0 +src1 )*20 - (src0 +src2 )*6 + (src1 +src3 )*3 - (src2 +src4 ));\
  560. src5= *s++;\
  561. OP(dst[ 1], (src1 +src2 )*20 - (src0 +src3 )*6 + (src0 +src4 )*3 - (src1 +src5 ));\
  562. src6= *s++;\
  563. OP(dst[ 2], (src2 +src3 )*20 - (src1 +src4 )*6 + (src0 +src5 )*3 - (src0 +src6 ));\
  564. src7= *s++;\
  565. OP(dst[ 3], (src3 +src4 )*20 - (src2 +src5 )*6 + (src1 +src6 )*3 - (src0 +src7 ));\
  566. src8= *s++;\
  567. OP(dst[ 4], (src4 +src5 )*20 - (src3 +src6 )*6 + (src2 +src7 )*3 - (src1 +src8 ));\
  568. src9= *s++;\
  569. OP(dst[ 5], (src5 +src6 )*20 - (src4 +src7 )*6 + (src3 +src8 )*3 - (src2 +src9 ));\
  570. src10= *s++;\
  571. OP(dst[ 6], (src6 +src7 )*20 - (src5 +src8 )*6 + (src4 +src9 )*3 - (src3 +src10));\
  572. src11= *s++;\
  573. OP(dst[ 7], (src7 +src8 )*20 - (src6 +src9 )*6 + (src5 +src10)*3 - (src4 +src11));\
  574. src12= *s++;\
  575. OP(dst[ 8], (src8 +src9 )*20 - (src7 +src10)*6 + (src6 +src11)*3 - (src5 +src12));\
  576. src13= *s++;\
  577. OP(dst[ 9], (src9 +src10)*20 - (src8 +src11)*6 + (src7 +src12)*3 - (src6 +src13));\
  578. src14= *s++;\
  579. OP(dst[10], (src10+src11)*20 - (src9 +src12)*6 + (src8 +src13)*3 - (src7 +src14));\
  580. src15= *s++;\
  581. OP(dst[11], (src11+src12)*20 - (src10+src13)*6 + (src9 +src14)*3 - (src8 +src15));\
  582. src16= *s++;\
  583. OP(dst[12], (src12+src13)*20 - (src11+src14)*6 + (src10+src15)*3 - (src9 +src16));\
  584. OP(dst[13], (src13+src14)*20 - (src12+src15)*6 + (src11+src16)*3 - (src10+src16));\
  585. OP(dst[14], (src14+src15)*20 - (src13+src16)*6 + (src12+src16)*3 - (src11+src15));\
  586. OP(dst[15], (src15+src16)*20 - (src14+src16)*6 + (src13+src15)*3 - (src12+src14));\
  587. dst+=dstStride;\
  588. src+=srcStride;\
  589. }while(--h);\
  590. }\
  591. \
  592. static void OPNAME ## mpeg4_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  593. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  594. int w=16;\
  595. do {\
  596. uint8_t *s = src, *d=dst;\
  597. int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
  598. int src9,src10,src11,src12,src13,src14,src15,src16;\
  599. src0 = *s; s+=srcStride; \
  600. src1 = *s; s+=srcStride; \
  601. src2 = *s; s+=srcStride; \
  602. src3 = *s; s+=srcStride; \
  603. src4 = *s; s+=srcStride; \
  604. OP(*d, (src0 +src1 )*20 - (src0 +src2 )*6 + (src1 +src3 )*3 - (src2 +src4 ));d+=dstStride;\
  605. src5 = *s; s+=srcStride; \
  606. OP(*d, (src1 +src2 )*20 - (src0 +src3 )*6 + (src0 +src4 )*3 - (src1 +src5 ));d+=dstStride;\
  607. src6 = *s; s+=srcStride; \
  608. OP(*d, (src2 +src3 )*20 - (src1 +src4 )*6 + (src0 +src5 )*3 - (src0 +src6 ));d+=dstStride;\
  609. src7 = *s; s+=srcStride; \
  610. OP(*d, (src3 +src4 )*20 - (src2 +src5 )*6 + (src1 +src6 )*3 - (src0 +src7 ));d+=dstStride;\
  611. src8 = *s; s+=srcStride; \
  612. OP(*d, (src4 +src5 )*20 - (src3 +src6 )*6 + (src2 +src7 )*3 - (src1 +src8 ));d+=dstStride;\
  613. src9 = *s; s+=srcStride; \
  614. OP(*d, (src5 +src6 )*20 - (src4 +src7 )*6 + (src3 +src8 )*3 - (src2 +src9 ));d+=dstStride;\
  615. src10 = *s; s+=srcStride; \
  616. OP(*d, (src6 +src7 )*20 - (src5 +src8 )*6 + (src4 +src9 )*3 - (src3 +src10));d+=dstStride;\
  617. src11 = *s; s+=srcStride; \
  618. OP(*d, (src7 +src8 )*20 - (src6 +src9 )*6 + (src5 +src10)*3 - (src4 +src11));d+=dstStride;\
  619. src12 = *s; s+=srcStride; \
  620. OP(*d, (src8 +src9 )*20 - (src7 +src10)*6 + (src6 +src11)*3 - (src5 +src12));d+=dstStride;\
  621. src13 = *s; s+=srcStride; \
  622. OP(*d, (src9 +src10)*20 - (src8 +src11)*6 + (src7 +src12)*3 - (src6 +src13));d+=dstStride;\
  623. src14 = *s; s+=srcStride; \
  624. OP(*d, (src10+src11)*20 - (src9 +src12)*6 + (src8 +src13)*3 - (src7 +src14));d+=dstStride;\
  625. src15 = *s; s+=srcStride; \
  626. OP(*d, (src11+src12)*20 - (src10+src13)*6 + (src9 +src14)*3 - (src8 +src15));d+=dstStride;\
  627. src16 = *s; \
  628. OP(*d, (src12+src13)*20 - (src11+src14)*6 + (src10+src15)*3 - (src9 +src16));d+=dstStride;\
  629. OP(*d, (src13+src14)*20 - (src12+src15)*6 + (src11+src16)*3 - (src10+src16));d+=dstStride;\
  630. OP(*d, (src14+src15)*20 - (src13+src16)*6 + (src12+src16)*3 - (src11+src15));d+=dstStride;\
  631. OP(*d, (src15+src16)*20 - (src14+src16)*6 + (src13+src15)*3 - (src12+src14));\
  632. dst++;\
  633. src++;\
  634. }while(--w);\
  635. }\
  636. \
  637. static void OPNAME ## qpel8_mc00_sh4 (uint8_t *dst, uint8_t *src, int stride){\
  638. OPNAME ## pixels8_c(dst, src, stride, 8);\
  639. }\
  640. \
  641. static void OPNAME ## qpel8_mc10_sh4(uint8_t *dst, uint8_t *src, int stride){\
  642. uint8_t half[64];\
  643. put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
  644. OPNAME ## pixels8_l2_aligned2(dst, src, half, stride, stride, 8, 8);\
  645. }\
  646. \
  647. static void OPNAME ## qpel8_mc20_sh4(uint8_t *dst, uint8_t *src, int stride){\
  648. OPNAME ## mpeg4_qpel8_h_lowpass(dst, src, stride, stride, 8);\
  649. }\
  650. \
  651. static void OPNAME ## qpel8_mc30_sh4(uint8_t *dst, uint8_t *src, int stride){\
  652. uint8_t half[64];\
  653. put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
  654. OPNAME ## pixels8_l2_aligned2(dst, src+1, half, stride, stride, 8, 8);\
  655. }\
  656. \
  657. static void OPNAME ## qpel8_mc01_sh4(uint8_t *dst, uint8_t *src, int stride){\
  658. uint8_t full[16*9];\
  659. uint8_t half[64];\
  660. copy_block9(full, src, 16, stride, 9);\
  661. put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
  662. OPNAME ## pixels8_l2_aligned(dst, full, half, stride, 16, 8, 8);\
  663. }\
  664. \
  665. static void OPNAME ## qpel8_mc02_sh4(uint8_t *dst, uint8_t *src, int stride){\
  666. uint8_t full[16*9];\
  667. copy_block9(full, src, 16, stride, 9);\
  668. OPNAME ## mpeg4_qpel8_v_lowpass(dst, full, stride, 16);\
  669. }\
  670. \
  671. static void OPNAME ## qpel8_mc03_sh4(uint8_t *dst, uint8_t *src, int stride){\
  672. uint8_t full[16*9];\
  673. uint8_t half[64];\
  674. copy_block9(full, src, 16, stride, 9);\
  675. put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
  676. OPNAME ## pixels8_l2_aligned(dst, full+16, half, stride, 16, 8, 8);\
  677. }\
  678. static void OPNAME ## qpel8_mc11_sh4(uint8_t *dst, uint8_t *src, int stride){\
  679. uint8_t full[16*9];\
  680. uint8_t halfH[72];\
  681. uint8_t halfHV[64];\
  682. copy_block9(full, src, 16, stride, 9);\
  683. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  684. put ## RND ## pixels8_l2_aligned(halfH, halfH, full, 8, 8, 16, 9);\
  685. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  686. OPNAME ## pixels8_l2_aligned(dst, halfH, halfHV, stride, 8, 8, 8);\
  687. }\
  688. static void OPNAME ## qpel8_mc31_sh4(uint8_t *dst, uint8_t *src, int stride){\
  689. uint8_t full[16*9];\
  690. uint8_t halfH[72];\
  691. uint8_t halfHV[64];\
  692. copy_block9(full, src, 16, stride, 9);\
  693. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  694. put ## RND ## pixels8_l2_aligned1(halfH, halfH, full+1, 8, 8, 16, 9);\
  695. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  696. OPNAME ## pixels8_l2_aligned(dst, halfH, halfHV, stride, 8, 8, 8);\
  697. }\
  698. static void OPNAME ## qpel8_mc13_sh4(uint8_t *dst, uint8_t *src, int stride){\
  699. uint8_t full[16*9];\
  700. uint8_t halfH[72];\
  701. uint8_t halfHV[64];\
  702. copy_block9(full, src, 16, stride, 9);\
  703. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  704. put ## RND ## pixels8_l2_aligned(halfH, halfH, full, 8, 8, 16, 9);\
  705. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  706. OPNAME ## pixels8_l2_aligned(dst, halfH+8, halfHV, stride, 8, 8, 8);\
  707. }\
  708. static void OPNAME ## qpel8_mc33_sh4(uint8_t *dst, uint8_t *src, int stride){\
  709. uint8_t full[16*9];\
  710. uint8_t halfH[72];\
  711. uint8_t halfHV[64];\
  712. copy_block9(full, src, 16, stride, 9);\
  713. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  714. put ## RND ## pixels8_l2_aligned1(halfH, halfH, full+1, 8, 8, 16, 9);\
  715. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  716. OPNAME ## pixels8_l2_aligned(dst, halfH+8, halfHV, stride, 8, 8, 8);\
  717. }\
  718. static void OPNAME ## qpel8_mc21_sh4(uint8_t *dst, uint8_t *src, int stride){\
  719. uint8_t halfH[72];\
  720. uint8_t halfHV[64];\
  721. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
  722. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  723. OPNAME ## pixels8_l2_aligned(dst, halfH, halfHV, stride, 8, 8, 8);\
  724. }\
  725. static void OPNAME ## qpel8_mc23_sh4(uint8_t *dst, uint8_t *src, int stride){\
  726. uint8_t halfH[72];\
  727. uint8_t halfHV[64];\
  728. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
  729. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  730. OPNAME ## pixels8_l2_aligned(dst, halfH+8, halfHV, stride, 8, 8, 8);\
  731. }\
  732. static void OPNAME ## qpel8_mc12_sh4(uint8_t *dst, uint8_t *src, int stride){\
  733. uint8_t full[16*9];\
  734. uint8_t halfH[72];\
  735. copy_block9(full, src, 16, stride, 9);\
  736. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  737. put ## RND ## pixels8_l2_aligned(halfH, halfH, full, 8, 8, 16, 9);\
  738. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
  739. }\
  740. static void OPNAME ## qpel8_mc32_sh4(uint8_t *dst, uint8_t *src, int stride){\
  741. uint8_t full[16*9];\
  742. uint8_t halfH[72];\
  743. copy_block9(full, src, 16, stride, 9);\
  744. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  745. put ## RND ## pixels8_l2_aligned1(halfH, halfH, full+1, 8, 8, 16, 9);\
  746. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
  747. }\
  748. static void OPNAME ## qpel8_mc22_sh4(uint8_t *dst, uint8_t *src, int stride){\
  749. uint8_t halfH[72];\
  750. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
  751. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
  752. }\
  753. static void OPNAME ## qpel16_mc00_sh4 (uint8_t *dst, uint8_t *src, int stride){\
  754. OPNAME ## pixels16_c(dst, src, stride, 16);\
  755. }\
  756. \
  757. static void OPNAME ## qpel16_mc10_sh4(uint8_t *dst, uint8_t *src, int stride){\
  758. uint8_t half[256];\
  759. put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
  760. OPNAME ## pixels16_l2_aligned2(dst, src, half, stride, stride, 16, 16);\
  761. }\
  762. \
  763. static void OPNAME ## qpel16_mc20_sh4(uint8_t *dst, uint8_t *src, int stride){\
  764. OPNAME ## mpeg4_qpel16_h_lowpass(dst, src, stride, stride, 16);\
  765. }\
  766. \
  767. static void OPNAME ## qpel16_mc30_sh4(uint8_t *dst, uint8_t *src, int stride){\
  768. uint8_t half[256];\
  769. put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
  770. OPNAME ## pixels16_l2_aligned2(dst, src+1, half, stride, stride, 16, 16);\
  771. }\
  772. \
  773. static void OPNAME ## qpel16_mc01_sh4(uint8_t *dst, uint8_t *src, int stride){\
  774. uint8_t full[24*17];\
  775. uint8_t half[256];\
  776. copy_block17(full, src, 24, stride, 17);\
  777. put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
  778. OPNAME ## pixels16_l2_aligned(dst, full, half, stride, 24, 16, 16);\
  779. }\
  780. \
  781. static void OPNAME ## qpel16_mc02_sh4(uint8_t *dst, uint8_t *src, int stride){\
  782. uint8_t full[24*17];\
  783. copy_block17(full, src, 24, stride, 17);\
  784. OPNAME ## mpeg4_qpel16_v_lowpass(dst, full, stride, 24);\
  785. }\
  786. \
  787. static void OPNAME ## qpel16_mc03_sh4(uint8_t *dst, uint8_t *src, int stride){\
  788. uint8_t full[24*17];\
  789. uint8_t half[256];\
  790. copy_block17(full, src, 24, stride, 17);\
  791. put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
  792. OPNAME ## pixels16_l2_aligned(dst, full+24, half, stride, 24, 16, 16);\
  793. }\
  794. static void OPNAME ## qpel16_mc11_sh4(uint8_t *dst, uint8_t *src, int stride){\
  795. uint8_t full[24*17];\
  796. uint8_t halfH[272];\
  797. uint8_t halfHV[256];\
  798. copy_block17(full, src, 24, stride, 17);\
  799. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  800. put ## RND ## pixels16_l2_aligned(halfH, halfH, full, 16, 16, 24, 17);\
  801. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  802. OPNAME ## pixels16_l2_aligned(dst, halfH, halfHV, stride, 16, 16, 16);\
  803. }\
  804. static void OPNAME ## qpel16_mc31_sh4(uint8_t *dst, uint8_t *src, int stride){\
  805. uint8_t full[24*17];\
  806. uint8_t halfH[272];\
  807. uint8_t halfHV[256];\
  808. copy_block17(full, src, 24, stride, 17);\
  809. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  810. put ## RND ## pixels16_l2_aligned1(halfH, halfH, full+1, 16, 16, 24, 17);\
  811. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  812. OPNAME ## pixels16_l2_aligned(dst, halfH, halfHV, stride, 16, 16, 16);\
  813. }\
  814. static void OPNAME ## qpel16_mc13_sh4(uint8_t *dst, uint8_t *src, int stride){\
  815. uint8_t full[24*17];\
  816. uint8_t halfH[272];\
  817. uint8_t halfHV[256];\
  818. copy_block17(full, src, 24, stride, 17);\
  819. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  820. put ## RND ## pixels16_l2_aligned(halfH, halfH, full, 16, 16, 24, 17);\
  821. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  822. OPNAME ## pixels16_l2_aligned(dst, halfH+16, halfHV, stride, 16, 16, 16);\
  823. }\
  824. static void OPNAME ## qpel16_mc33_sh4(uint8_t *dst, uint8_t *src, int stride){\
  825. uint8_t full[24*17];\
  826. uint8_t halfH[272];\
  827. uint8_t halfHV[256];\
  828. copy_block17(full, src, 24, stride, 17);\
  829. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  830. put ## RND ## pixels16_l2_aligned1(halfH, halfH, full+1, 16, 16, 24, 17);\
  831. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  832. OPNAME ## pixels16_l2_aligned(dst, halfH+16, halfHV, stride, 16, 16, 16);\
  833. }\
  834. static void OPNAME ## qpel16_mc21_sh4(uint8_t *dst, uint8_t *src, int stride){\
  835. uint8_t halfH[272];\
  836. uint8_t halfHV[256];\
  837. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
  838. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  839. OPNAME ## pixels16_l2_aligned(dst, halfH, halfHV, stride, 16, 16, 16);\
  840. }\
  841. static void OPNAME ## qpel16_mc23_sh4(uint8_t *dst, uint8_t *src, int stride){\
  842. uint8_t halfH[272];\
  843. uint8_t halfHV[256];\
  844. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
  845. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  846. OPNAME ## pixels16_l2_aligned(dst, halfH+16, halfHV, stride, 16, 16, 16);\
  847. }\
  848. static void OPNAME ## qpel16_mc12_sh4(uint8_t *dst, uint8_t *src, int stride){\
  849. uint8_t full[24*17];\
  850. uint8_t halfH[272];\
  851. copy_block17(full, src, 24, stride, 17);\
  852. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  853. put ## RND ## pixels16_l2_aligned(halfH, halfH, full, 16, 16, 24, 17);\
  854. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
  855. }\
  856. static void OPNAME ## qpel16_mc32_sh4(uint8_t *dst, uint8_t *src, int stride){\
  857. uint8_t full[24*17];\
  858. uint8_t halfH[272];\
  859. copy_block17(full, src, 24, stride, 17);\
  860. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  861. put ## RND ## pixels16_l2_aligned1(halfH, halfH, full+1, 16, 16, 24, 17);\
  862. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
  863. }\
  864. static void OPNAME ## qpel16_mc22_sh4(uint8_t *dst, uint8_t *src, int stride){\
  865. uint8_t halfH[272];\
  866. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
  867. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
  868. }
  869. #define op_avg(a, b) a = (((a)+cm[((b) + 16)>>5]+1)>>1)
  870. #define op_avg_no_rnd(a, b) a = (((a)+cm[((b) + 15)>>5])>>1)
  871. #define op_put(a, b) a = cm[((b) + 16)>>5]
  872. #define op_put_no_rnd(a, b) a = cm[((b) + 15)>>5]
  873. QPEL_MC(0, put_ , _ , op_put)
  874. QPEL_MC(1, put_no_rnd_, _no_rnd_, op_put_no_rnd)
  875. QPEL_MC(0, avg_ , _ , op_avg)
  876. //QPEL_MC(1, avg_no_rnd , _ , op_avg)
  877. #undef op_avg
  878. #undef op_avg_no_rnd
  879. #undef op_put
  880. #undef op_put_no_rnd
  881. #define H264_LOWPASS(OPNAME, OP, OP2) \
  882. static inline void OPNAME ## h264_qpel_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,int w,int h){\
  883. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  884. do {\
  885. int srcB,srcA,src0,src1,src2,src3,src4,src5,src6;\
  886. uint8_t *s = src-2;\
  887. srcB = *s++;\
  888. srcA = *s++;\
  889. src0 = *s++;\
  890. src1 = *s++;\
  891. src2 = *s++;\
  892. src3 = *s++;\
  893. OP(dst[0], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
  894. src4 = *s++;\
  895. OP(dst[1], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
  896. src5 = *s++;\
  897. OP(dst[2], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
  898. src6 = *s++;\
  899. OP(dst[3], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
  900. if (w>4) { /* it optimized */ \
  901. int src7,src8,src9,src10; \
  902. src7 = *s++;\
  903. OP(dst[4], (src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
  904. src8 = *s++;\
  905. OP(dst[5], (src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
  906. src9 = *s++;\
  907. OP(dst[6], (src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
  908. src10 = *s++;\
  909. OP(dst[7], (src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
  910. if (w>8) { \
  911. int src11,src12,src13,src14,src15,src16,src17,src18; \
  912. src11 = *s++;\
  913. OP(dst[8] , (src8 +src9 )*20 - (src7 +src10)*5 + (src6 +src11));\
  914. src12 = *s++;\
  915. OP(dst[9] , (src9 +src10)*20 - (src8 +src11)*5 + (src7 +src12));\
  916. src13 = *s++;\
  917. OP(dst[10], (src10+src11)*20 - (src9 +src12)*5 + (src8 +src13));\
  918. src14 = *s++;\
  919. OP(dst[11], (src11+src12)*20 - (src10+src13)*5 + (src9 +src14));\
  920. src15 = *s++;\
  921. OP(dst[12], (src12+src13)*20 - (src11+src14)*5 + (src10+src15));\
  922. src16 = *s++;\
  923. OP(dst[13], (src13+src14)*20 - (src12+src15)*5 + (src11+src16));\
  924. src17 = *s++;\
  925. OP(dst[14], (src14+src15)*20 - (src13+src16)*5 + (src12+src17));\
  926. src18 = *s++;\
  927. OP(dst[15], (src15+src16)*20 - (src14+src17)*5 + (src13+src18));\
  928. } \
  929. } \
  930. dst+=dstStride;\
  931. src+=srcStride;\
  932. }while(--h);\
  933. }\
  934. \
  935. static inline void OPNAME ## h264_qpel_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,int w,int h){\
  936. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  937. do{\
  938. int srcB,srcA,src0,src1,src2,src3,src4,src5,src6;\
  939. uint8_t *s = src-2*srcStride,*d=dst;\
  940. srcB = *s; s+=srcStride;\
  941. srcA = *s; s+=srcStride;\
  942. src0 = *s; s+=srcStride;\
  943. src1 = *s; s+=srcStride;\
  944. src2 = *s; s+=srcStride;\
  945. src3 = *s; s+=srcStride;\
  946. OP(*d, (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));d+=dstStride;\
  947. src4 = *s; s+=srcStride;\
  948. OP(*d, (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));d+=dstStride;\
  949. src5 = *s; s+=srcStride;\
  950. OP(*d, (src2+src3)*20 - (src1+src4)*5 + (src0+src5));d+=dstStride;\
  951. src6 = *s; s+=srcStride;\
  952. OP(*d, (src3+src4)*20 - (src2+src5)*5 + (src1+src6));d+=dstStride;\
  953. if (h>4) { \
  954. int src7,src8,src9,src10; \
  955. src7 = *s; s+=srcStride;\
  956. OP(*d, (src4+src5)*20 - (src3+src6)*5 + (src2+src7));d+=dstStride;\
  957. src8 = *s; s+=srcStride;\
  958. OP(*d, (src5+src6)*20 - (src4+src7)*5 + (src3+src8));d+=dstStride;\
  959. src9 = *s; s+=srcStride;\
  960. OP(*d, (src6+src7)*20 - (src5+src8)*5 + (src4+src9));d+=dstStride;\
  961. src10 = *s; s+=srcStride;\
  962. OP(*d, (src7+src8)*20 - (src6+src9)*5 + (src5+src10));d+=dstStride;\
  963. if (h>8) { \
  964. int src11,src12,src13,src14,src15,src16,src17,src18; \
  965. src11 = *s; s+=srcStride;\
  966. OP(*d , (src8 +src9 )*20 - (src7 +src10)*5 + (src6 +src11));d+=dstStride;\
  967. src12 = *s; s+=srcStride;\
  968. OP(*d , (src9 +src10)*20 - (src8 +src11)*5 + (src7 +src12));d+=dstStride;\
  969. src13 = *s; s+=srcStride;\
  970. OP(*d, (src10+src11)*20 - (src9 +src12)*5 + (src8 +src13));d+=dstStride;\
  971. src14 = *s; s+=srcStride;\
  972. OP(*d, (src11+src12)*20 - (src10+src13)*5 + (src9 +src14));d+=dstStride;\
  973. src15 = *s; s+=srcStride;\
  974. OP(*d, (src12+src13)*20 - (src11+src14)*5 + (src10+src15));d+=dstStride;\
  975. src16 = *s; s+=srcStride;\
  976. OP(*d, (src13+src14)*20 - (src12+src15)*5 + (src11+src16));d+=dstStride;\
  977. src17 = *s; s+=srcStride;\
  978. OP(*d, (src14+src15)*20 - (src13+src16)*5 + (src12+src17));d+=dstStride;\
  979. src18 = *s; s+=srcStride;\
  980. OP(*d, (src15+src16)*20 - (src14+src17)*5 + (src13+src18));d+=dstStride;\
  981. } \
  982. } \
  983. dst++;\
  984. src++;\
  985. }while(--w);\
  986. }\
  987. \
  988. static inline void OPNAME ## h264_qpel_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride,int w,int h){\
  989. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  990. int i;\
  991. src -= 2*srcStride;\
  992. i= h+5; \
  993. do {\
  994. int srcB,srcA,src0,src1,src2,src3,src4,src5,src6;\
  995. uint8_t *s = src-2;\
  996. srcB = *s++;\
  997. srcA = *s++;\
  998. src0 = *s++;\
  999. src1 = *s++;\
  1000. src2 = *s++;\
  1001. src3 = *s++;\
  1002. tmp[0] = ((src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
  1003. src4 = *s++;\
  1004. tmp[1] = ((src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
  1005. src5 = *s++;\
  1006. tmp[2] = ((src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
  1007. src6 = *s++;\
  1008. tmp[3] = ((src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
  1009. if (w>4) { /* it optimized */ \
  1010. int src7,src8,src9,src10; \
  1011. src7 = *s++;\
  1012. tmp[4] = ((src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
  1013. src8 = *s++;\
  1014. tmp[5] = ((src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
  1015. src9 = *s++;\
  1016. tmp[6] = ((src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
  1017. src10 = *s++;\
  1018. tmp[7] = ((src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
  1019. if (w>8) { \
  1020. int src11,src12,src13,src14,src15,src16,src17,src18; \
  1021. src11 = *s++;\
  1022. tmp[8] = ((src8 +src9 )*20 - (src7 +src10)*5 + (src6 +src11));\
  1023. src12 = *s++;\
  1024. tmp[9] = ((src9 +src10)*20 - (src8 +src11)*5 + (src7 +src12));\
  1025. src13 = *s++;\
  1026. tmp[10] = ((src10+src11)*20 - (src9 +src12)*5 + (src8 +src13));\
  1027. src14 = *s++;\
  1028. tmp[11] = ((src11+src12)*20 - (src10+src13)*5 + (src9 +src14));\
  1029. src15 = *s++;\
  1030. tmp[12] = ((src12+src13)*20 - (src11+src14)*5 + (src10+src15));\
  1031. src16 = *s++;\
  1032. tmp[13] = ((src13+src14)*20 - (src12+src15)*5 + (src11+src16));\
  1033. src17 = *s++;\
  1034. tmp[14] = ((src14+src15)*20 - (src13+src16)*5 + (src12+src17));\
  1035. src18 = *s++;\
  1036. tmp[15] = ((src15+src16)*20 - (src14+src17)*5 + (src13+src18));\
  1037. } \
  1038. } \
  1039. tmp+=tmpStride;\
  1040. src+=srcStride;\
  1041. }while(--i);\
  1042. tmp -= tmpStride*(h+5-2);\
  1043. i = w; \
  1044. do {\
  1045. int tmpB,tmpA,tmp0,tmp1,tmp2,tmp3,tmp4,tmp5,tmp6;\
  1046. int16_t *s = tmp-2*tmpStride; \
  1047. uint8_t *d=dst;\
  1048. tmpB = *s; s+=tmpStride;\
  1049. tmpA = *s; s+=tmpStride;\
  1050. tmp0 = *s; s+=tmpStride;\
  1051. tmp1 = *s; s+=tmpStride;\
  1052. tmp2 = *s; s+=tmpStride;\
  1053. tmp3 = *s; s+=tmpStride;\
  1054. OP2(*d, (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));d+=dstStride;\
  1055. tmp4 = *s; s+=tmpStride;\
  1056. OP2(*d, (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));d+=dstStride;\
  1057. tmp5 = *s; s+=tmpStride;\
  1058. OP2(*d, (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));d+=dstStride;\
  1059. tmp6 = *s; s+=tmpStride;\
  1060. OP2(*d, (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));d+=dstStride;\
  1061. if (h>4) { \
  1062. int tmp7,tmp8,tmp9,tmp10; \
  1063. tmp7 = *s; s+=tmpStride;\
  1064. OP2(*d, (tmp4+tmp5)*20 - (tmp3+tmp6)*5 + (tmp2+tmp7));d+=dstStride;\
  1065. tmp8 = *s; s+=tmpStride;\
  1066. OP2(*d, (tmp5+tmp6)*20 - (tmp4+tmp7)*5 + (tmp3+tmp8));d+=dstStride;\
  1067. tmp9 = *s; s+=tmpStride;\
  1068. OP2(*d, (tmp6+tmp7)*20 - (tmp5+tmp8)*5 + (tmp4+tmp9));d+=dstStride;\
  1069. tmp10 = *s; s+=tmpStride;\
  1070. OP2(*d, (tmp7+tmp8)*20 - (tmp6+tmp9)*5 + (tmp5+tmp10));d+=dstStride;\
  1071. if (h>8) { \
  1072. int tmp11,tmp12,tmp13,tmp14,tmp15,tmp16,tmp17,tmp18; \
  1073. tmp11 = *s; s+=tmpStride;\
  1074. OP2(*d , (tmp8 +tmp9 )*20 - (tmp7 +tmp10)*5 + (tmp6 +tmp11));d+=dstStride;\
  1075. tmp12 = *s; s+=tmpStride;\
  1076. OP2(*d , (tmp9 +tmp10)*20 - (tmp8 +tmp11)*5 + (tmp7 +tmp12));d+=dstStride;\
  1077. tmp13 = *s; s+=tmpStride;\
  1078. OP2(*d, (tmp10+tmp11)*20 - (tmp9 +tmp12)*5 + (tmp8 +tmp13));d+=dstStride;\
  1079. tmp14 = *s; s+=tmpStride;\
  1080. OP2(*d, (tmp11+tmp12)*20 - (tmp10+tmp13)*5 + (tmp9 +tmp14));d+=dstStride;\
  1081. tmp15 = *s; s+=tmpStride;\
  1082. OP2(*d, (tmp12+tmp13)*20 - (tmp11+tmp14)*5 + (tmp10+tmp15));d+=dstStride;\
  1083. tmp16 = *s; s+=tmpStride;\
  1084. OP2(*d, (tmp13+tmp14)*20 - (tmp12+tmp15)*5 + (tmp11+tmp16));d+=dstStride;\
  1085. tmp17 = *s; s+=tmpStride;\
  1086. OP2(*d, (tmp14+tmp15)*20 - (tmp13+tmp16)*5 + (tmp12+tmp17));d+=dstStride;\
  1087. tmp18 = *s; s+=tmpStride;\
  1088. OP2(*d, (tmp15+tmp16)*20 - (tmp14+tmp17)*5 + (tmp13+tmp18));d+=dstStride;\
  1089. } \
  1090. } \
  1091. dst++;\
  1092. tmp++;\
  1093. }while(--i);\
  1094. }\
  1095. \
  1096. static void OPNAME ## h264_qpel4_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1097. OPNAME ## h264_qpel_h_lowpass(dst,src,dstStride,srcStride,4,4); \
  1098. }\
  1099. static void OPNAME ## h264_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1100. OPNAME ## h264_qpel_h_lowpass(dst,src,dstStride,srcStride,8,8); \
  1101. }\
  1102. static void OPNAME ## h264_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1103. OPNAME ## h264_qpel_h_lowpass(dst,src,dstStride,srcStride,16,16); \
  1104. }\
  1105. \
  1106. static void OPNAME ## h264_qpel4_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1107. OPNAME ## h264_qpel_v_lowpass(dst,src,dstStride,srcStride,4,4); \
  1108. }\
  1109. static void OPNAME ## h264_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1110. OPNAME ## h264_qpel_v_lowpass(dst,src,dstStride,srcStride,8,8); \
  1111. }\
  1112. static void OPNAME ## h264_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1113. OPNAME ## h264_qpel_v_lowpass(dst,src,dstStride,srcStride,16,16); \
  1114. }\
  1115. static void OPNAME ## h264_qpel4_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  1116. OPNAME ## h264_qpel_hv_lowpass(dst,tmp,src,dstStride,tmpStride,srcStride,4,4); \
  1117. }\
  1118. static void OPNAME ## h264_qpel8_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  1119. OPNAME ## h264_qpel_hv_lowpass(dst,tmp,src,dstStride,tmpStride,srcStride,8,8); \
  1120. }\
  1121. static void OPNAME ## h264_qpel16_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  1122. OPNAME ## h264_qpel_hv_lowpass(dst,tmp,src,dstStride,tmpStride,srcStride,16,16); \
  1123. }\
  1124. #define H264_MC(OPNAME, SIZE) \
  1125. static void OPNAME ## h264_qpel ## SIZE ## _mc00_sh4 (uint8_t *dst, uint8_t *src, int stride){\
  1126. OPNAME ## pixels ## SIZE ## _c(dst, src, stride, SIZE);\
  1127. }\
  1128. \
  1129. static void OPNAME ## h264_qpel ## SIZE ## _mc10_sh4(uint8_t *dst, uint8_t *src, int stride){\
  1130. uint8_t half[SIZE*SIZE];\
  1131. put_h264_qpel ## SIZE ## _h_lowpass(half, src, SIZE, stride);\
  1132. OPNAME ## pixels ## SIZE ## _l2_aligned2(dst, src, half, stride, stride, SIZE, SIZE);\
  1133. }\
  1134. \
  1135. static void OPNAME ## h264_qpel ## SIZE ## _mc20_sh4(uint8_t *dst, uint8_t *src, int stride){\
  1136. OPNAME ## h264_qpel ## SIZE ## _h_lowpass(dst, src, stride, stride);\
  1137. }\
  1138. \
  1139. static void OPNAME ## h264_qpel ## SIZE ## _mc30_sh4(uint8_t *dst, uint8_t *src, int stride){\
  1140. uint8_t half[SIZE*SIZE];\
  1141. put_h264_qpel ## SIZE ## _h_lowpass(half, src, SIZE, stride);\
  1142. OPNAME ## pixels ## SIZE ## _l2_aligned2(dst, src+1, half, stride, stride, SIZE, SIZE);\
  1143. }\
  1144. \
  1145. static void OPNAME ## h264_qpel ## SIZE ## _mc01_sh4(uint8_t *dst, uint8_t *src, int stride){\
  1146. uint8_t full[SIZE*(SIZE+5)];\
  1147. uint8_t * const full_mid= full + SIZE*2;\
  1148. uint8_t half[SIZE*SIZE];\
  1149. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1150. put_h264_qpel ## SIZE ## _v_lowpass(half, full_mid, SIZE, SIZE);\
  1151. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, full_mid, half, stride, SIZE, SIZE, SIZE);\
  1152. }\
  1153. \
  1154. static void OPNAME ## h264_qpel ## SIZE ## _mc02_sh4(uint8_t *dst, uint8_t *src, int stride){\
  1155. uint8_t full[SIZE*(SIZE+5)];\
  1156. uint8_t * const full_mid= full + SIZE*2;\
  1157. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1158. OPNAME ## h264_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE);\
  1159. }\
  1160. \
  1161. static void OPNAME ## h264_qpel ## SIZE ## _mc03_sh4(uint8_t *dst, uint8_t *src, int stride){\
  1162. uint8_t full[SIZE*(SIZE+5)];\
  1163. uint8_t * const full_mid= full + SIZE*2;\
  1164. uint8_t half[SIZE*SIZE];\
  1165. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1166. put_h264_qpel ## SIZE ## _v_lowpass(half, full_mid, SIZE, SIZE);\
  1167. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, full_mid+SIZE, half, stride, SIZE, SIZE, SIZE);\
  1168. }\
  1169. \
  1170. static void OPNAME ## h264_qpel ## SIZE ## _mc11_sh4(uint8_t *dst, uint8_t *src, int stride){\
  1171. uint8_t full[SIZE*(SIZE+5)];\
  1172. uint8_t * const full_mid= full + SIZE*2;\
  1173. uint8_t halfH[SIZE*SIZE];\
  1174. uint8_t halfV[SIZE*SIZE];\
  1175. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
  1176. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1177. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1178. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
  1179. }\
  1180. \
  1181. static void OPNAME ## h264_qpel ## SIZE ## _mc31_sh4(uint8_t *dst, uint8_t *src, int stride){\
  1182. uint8_t full[SIZE*(SIZE+5)];\
  1183. uint8_t * const full_mid= full + SIZE*2;\
  1184. uint8_t halfH[SIZE*SIZE];\
  1185. uint8_t halfV[SIZE*SIZE];\
  1186. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
  1187. copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
  1188. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1189. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
  1190. }\
  1191. \
  1192. static void OPNAME ## h264_qpel ## SIZE ## _mc13_sh4(uint8_t *dst, uint8_t *src, int stride){\
  1193. uint8_t full[SIZE*(SIZE+5)];\
  1194. uint8_t * const full_mid= full + SIZE*2;\
  1195. uint8_t halfH[SIZE*SIZE];\
  1196. uint8_t halfV[SIZE*SIZE];\
  1197. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
  1198. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1199. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1200. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
  1201. }\
  1202. \
  1203. static void OPNAME ## h264_qpel ## SIZE ## _mc33_sh4(uint8_t *dst, uint8_t *src, int stride){\
  1204. uint8_t full[SIZE*(SIZE+5)];\
  1205. uint8_t * const full_mid= full + SIZE*2;\
  1206. uint8_t halfH[SIZE*SIZE];\
  1207. uint8_t halfV[SIZE*SIZE];\
  1208. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
  1209. copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
  1210. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1211. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
  1212. }\
  1213. \
  1214. static void OPNAME ## h264_qpel ## SIZE ## _mc22_sh4(uint8_t *dst, uint8_t *src, int stride){\
  1215. int16_t tmp[SIZE*(SIZE+5)];\
  1216. OPNAME ## h264_qpel ## SIZE ## _hv_lowpass(dst, tmp, src, stride, SIZE, stride);\
  1217. }\
  1218. \
  1219. static void OPNAME ## h264_qpel ## SIZE ## _mc21_sh4(uint8_t *dst, uint8_t *src, int stride){\
  1220. int16_t tmp[SIZE*(SIZE+5)];\
  1221. uint8_t halfH[SIZE*SIZE];\
  1222. uint8_t halfHV[SIZE*SIZE];\
  1223. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
  1224. put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
  1225. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfHV, stride, SIZE, SIZE, SIZE);\
  1226. }\
  1227. \
  1228. static void OPNAME ## h264_qpel ## SIZE ## _mc23_sh4(uint8_t *dst, uint8_t *src, int stride){\
  1229. int16_t tmp[SIZE*(SIZE+5)];\
  1230. uint8_t halfH[SIZE*SIZE];\
  1231. uint8_t halfHV[SIZE*SIZE];\
  1232. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
  1233. put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
  1234. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfHV, stride, SIZE, SIZE, SIZE);\
  1235. }\
  1236. \
  1237. static void OPNAME ## h264_qpel ## SIZE ## _mc12_sh4(uint8_t *dst, uint8_t *src, int stride){\
  1238. uint8_t full[SIZE*(SIZE+5)];\
  1239. uint8_t * const full_mid= full + SIZE*2;\
  1240. int16_t tmp[SIZE*(SIZE+5)];\
  1241. uint8_t halfV[SIZE*SIZE];\
  1242. uint8_t halfHV[SIZE*SIZE];\
  1243. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1244. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1245. put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
  1246. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfV, halfHV, stride, SIZE, SIZE, SIZE);\
  1247. }\
  1248. \
  1249. static void OPNAME ## h264_qpel ## SIZE ## _mc32_sh4(uint8_t *dst, uint8_t *src, int stride){\
  1250. uint8_t full[SIZE*(SIZE+5)];\
  1251. uint8_t * const full_mid= full + SIZE*2;\
  1252. int16_t tmp[SIZE*(SIZE+5)];\
  1253. uint8_t halfV[SIZE*SIZE];\
  1254. uint8_t halfHV[SIZE*SIZE];\
  1255. copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
  1256. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1257. put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
  1258. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfV, halfHV, stride, SIZE, SIZE, SIZE);\
  1259. }\
  1260. #define op_avg(a, b) a = (((a)+cm[((b) + 16)>>5]+1)>>1)
  1261. //#define op_avg2(a, b) a = (((a)*w1+cm[((b) + 16)>>5]*w2 + o + 64)>>7)
  1262. #define op_put(a, b) a = cm[((b) + 16)>>5]
  1263. #define op2_avg(a, b) a = (((a)+cm[((b) + 512)>>10]+1)>>1)
  1264. #define op2_put(a, b) a = cm[((b) + 512)>>10]
  1265. H264_LOWPASS(put_ , op_put, op2_put)
  1266. H264_LOWPASS(avg_ , op_avg, op2_avg)
  1267. H264_MC(put_, 4)
  1268. H264_MC(put_, 8)
  1269. H264_MC(put_, 16)
  1270. H264_MC(avg_, 4)
  1271. H264_MC(avg_, 8)
  1272. H264_MC(avg_, 16)
  1273. #undef op_avg
  1274. #undef op_put
  1275. #undef op2_avg
  1276. #undef op2_put
  1277. static void wmv2_mspel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){
  1278. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  1279. do{
  1280. int src_1,src0,src1,src2,src3,src4,src5,src6,src7,src8,src9;
  1281. uint8_t *s = src;
  1282. src_1 = s[-1];
  1283. src0 = *s++;
  1284. src1 = *s++;
  1285. src2 = *s++;
  1286. dst[0]= cm[(9*(src0 + src1) - (src_1 + src2) + 8)>>4];
  1287. src3 = *s++;
  1288. dst[1]= cm[(9*(src1 + src2) - (src0 + src3) + 8)>>4];
  1289. src4 = *s++;
  1290. dst[2]= cm[(9*(src2 + src3) - (src1 + src4) + 8)>>4];
  1291. src5 = *s++;
  1292. dst[3]= cm[(9*(src3 + src4) - (src2 + src5) + 8)>>4];
  1293. src6 = *s++;
  1294. dst[4]= cm[(9*(src4 + src5) - (src3 + src6) + 8)>>4];
  1295. src7 = *s++;
  1296. dst[5]= cm[(9*(src5 + src6) - (src4 + src7) + 8)>>4];
  1297. src8 = *s++;
  1298. dst[6]= cm[(9*(src6 + src7) - (src5 + src8) + 8)>>4];
  1299. src9 = *s++;
  1300. dst[7]= cm[(9*(src7 + src8) - (src6 + src9) + 8)>>4];
  1301. dst+=dstStride;
  1302. src+=srcStride;
  1303. }while(--h);
  1304. }
  1305. static void wmv2_mspel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int w){
  1306. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  1307. do{
  1308. int src_1,src0,src1,src2,src3,src4,src5,src6,src7,src8,src9;
  1309. uint8_t *s = src,*d = dst;
  1310. src_1 = *(s-srcStride);
  1311. src0 = *s; s+=srcStride;
  1312. src1 = *s; s+=srcStride;
  1313. src2 = *s; s+=srcStride;
  1314. *d= cm[(9*(src0 + src1) - (src_1 + src2) + 8)>>4]; d+=dstStride;
  1315. src3 = *s; s+=srcStride;
  1316. *d= cm[(9*(src1 + src2) - (src0 + src3) + 8)>>4]; d+=dstStride;
  1317. src4 = *s; s+=srcStride;
  1318. *d= cm[(9*(src2 + src3) - (src1 + src4) + 8)>>4]; d+=dstStride;
  1319. src5 = *s; s+=srcStride;
  1320. *d= cm[(9*(src3 + src4) - (src2 + src5) + 8)>>4]; d+=dstStride;
  1321. src6 = *s; s+=srcStride;
  1322. *d= cm[(9*(src4 + src5) - (src3 + src6) + 8)>>4]; d+=dstStride;
  1323. src7 = *s; s+=srcStride;
  1324. *d= cm[(9*(src5 + src6) - (src4 + src7) + 8)>>4]; d+=dstStride;
  1325. src8 = *s; s+=srcStride;
  1326. *d= cm[(9*(src6 + src7) - (src5 + src8) + 8)>>4]; d+=dstStride;
  1327. src9 = *s;
  1328. *d= cm[(9*(src7 + src8) - (src6 + src9) + 8)>>4]; d+=dstStride;
  1329. src++;
  1330. dst++;
  1331. }while(--w);
  1332. }
  1333. static void put_mspel8_mc00_sh4 (uint8_t *dst, uint8_t *src, int stride){
  1334. put_pixels8_c(dst, src, stride, 8);
  1335. }
  1336. static void put_mspel8_mc10_sh4(uint8_t *dst, uint8_t *src, int stride){
  1337. uint8_t half[64];
  1338. wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
  1339. put_pixels8_l2_aligned2(dst, src, half, stride, stride, 8, 8);
  1340. }
  1341. static void put_mspel8_mc20_sh4(uint8_t *dst, uint8_t *src, int stride){
  1342. wmv2_mspel8_h_lowpass(dst, src, stride, stride, 8);
  1343. }
  1344. static void put_mspel8_mc30_sh4(uint8_t *dst, uint8_t *src, int stride){
  1345. uint8_t half[64];
  1346. wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
  1347. put_pixels8_l2_aligned2(dst, src+1, half, stride, stride, 8, 8);
  1348. }
  1349. static void put_mspel8_mc02_sh4(uint8_t *dst, uint8_t *src, int stride){
  1350. wmv2_mspel8_v_lowpass(dst, src, stride, stride, 8);
  1351. }
  1352. static void put_mspel8_mc12_sh4(uint8_t *dst, uint8_t *src, int stride){
  1353. uint8_t halfH[88];
  1354. uint8_t halfV[64];
  1355. uint8_t halfHV[64];
  1356. wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
  1357. wmv2_mspel8_v_lowpass(halfV, src, 8, stride, 8);
  1358. wmv2_mspel8_v_lowpass(halfHV, halfH+8, 8, 8, 8);
  1359. put_pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);
  1360. }
  1361. static void put_mspel8_mc32_sh4(uint8_t *dst, uint8_t *src, int stride){
  1362. uint8_t halfH[88];
  1363. uint8_t halfV[64];
  1364. uint8_t halfHV[64];
  1365. wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
  1366. wmv2_mspel8_v_lowpass(halfV, src+1, 8, stride, 8);
  1367. wmv2_mspel8_v_lowpass(halfHV, halfH+8, 8, 8, 8);
  1368. put_pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);
  1369. }
  1370. static void put_mspel8_mc22_sh4(uint8_t *dst, uint8_t *src, int stride){
  1371. uint8_t halfH[88];
  1372. wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
  1373. wmv2_mspel8_v_lowpass(dst, halfH+8, stride, 8, 8);
  1374. }