You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

863 lines
38KB

  1. /*
  2. * This is optimized for sh, which have post increment addressing (*p++).
  3. * Some CPU may be index (p[n]) faster than post increment (*p++).
  4. *
  5. * copyright (c) 2001-2003 BERO <bero@geocities.co.jp>
  6. *
  7. * This file is part of Libav.
  8. *
  9. * Libav is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * Libav is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with Libav; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #include "libavutil/common.h"
  24. #include "libavcodec/copy_block.h"
  25. #include "libavcodec/rnd_avg.h"
  26. #define PIXOP2(OPNAME, OP) \
  27. \
  28. static inline void OPNAME ## _pixels4_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  29. {\
  30. do {\
  31. OP(LP(dst ),rnd_avg32(LPC(src1 ),LPC(src2 )) ); \
  32. src1+=src_stride1; \
  33. src2+=src_stride2; \
  34. dst+=dst_stride; \
  35. } while(--h); \
  36. }\
  37. \
  38. static inline void OPNAME ## _pixels4_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  39. {\
  40. do {\
  41. OP(LP(dst ),rnd_avg32(AV_RN32(src1 ),LPC(src2 )) ); \
  42. src1+=src_stride1; \
  43. src2+=src_stride2; \
  44. dst+=dst_stride; \
  45. } while(--h); \
  46. }\
  47. \
  48. static inline void OPNAME ## _no_rnd_pixels16_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  49. {\
  50. do {\
  51. OP(LP(dst ),no_rnd_avg32(AV_RN32(src1 ),LPC(src2 )) ); \
  52. OP(LP(dst+4),no_rnd_avg32(AV_RN32(src1+4),LPC(src2+4)) ); \
  53. OP(LP(dst+8),no_rnd_avg32(AV_RN32(src1+8),LPC(src2+8)) ); \
  54. OP(LP(dst+12),no_rnd_avg32(AV_RN32(src1+12),LPC(src2+12)) ); \
  55. src1+=src_stride1; \
  56. src2+=src_stride2; \
  57. dst+=dst_stride; \
  58. } while(--h); \
  59. }\
  60. \
  61. static inline void OPNAME ## _pixels16_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  62. {\
  63. do {\
  64. OP(LP(dst ),rnd_avg32(AV_RN32(src1 ),LPC(src2 )) ); \
  65. OP(LP(dst+4),rnd_avg32(AV_RN32(src1+4),LPC(src2+4)) ); \
  66. OP(LP(dst+8),rnd_avg32(AV_RN32(src1+8),LPC(src2+8)) ); \
  67. OP(LP(dst+12),rnd_avg32(AV_RN32(src1+12),LPC(src2+12)) ); \
  68. src1+=src_stride1; \
  69. src2+=src_stride2; \
  70. dst+=dst_stride; \
  71. } while(--h); \
  72. }\
  73. \
  74. static inline void OPNAME ## _no_rnd_pixels8_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  75. {\
  76. do { /* onlye src2 aligned */\
  77. OP(LP(dst ),no_rnd_avg32(AV_RN32(src1 ),LPC(src2 )) ); \
  78. OP(LP(dst+4),no_rnd_avg32(AV_RN32(src1+4),LPC(src2+4)) ); \
  79. src1+=src_stride1; \
  80. src2+=src_stride2; \
  81. dst+=dst_stride; \
  82. } while(--h); \
  83. }\
  84. \
  85. static inline void OPNAME ## _pixels8_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  86. {\
  87. do {\
  88. OP(LP(dst ),rnd_avg32(AV_RN32(src1 ),LPC(src2 )) ); \
  89. OP(LP(dst+4),rnd_avg32(AV_RN32(src1+4),LPC(src2+4)) ); \
  90. src1+=src_stride1; \
  91. src2+=src_stride2; \
  92. dst+=dst_stride; \
  93. } while(--h); \
  94. }\
  95. \
  96. static inline void OPNAME ## _no_rnd_pixels8_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  97. {\
  98. do {\
  99. OP(LP(dst ),no_rnd_avg32(LPC(src1 ),LPC(src2 )) ); \
  100. OP(LP(dst+4),no_rnd_avg32(LPC(src1+4),LPC(src2+4)) ); \
  101. src1+=src_stride1; \
  102. src2+=src_stride2; \
  103. dst+=dst_stride; \
  104. } while(--h); \
  105. }\
  106. \
  107. static inline void OPNAME ## _pixels8_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  108. {\
  109. do {\
  110. OP(LP(dst ),rnd_avg32(LPC(src1 ),LPC(src2 )) ); \
  111. OP(LP(dst+4),rnd_avg32(LPC(src1+4),LPC(src2+4)) ); \
  112. src1+=src_stride1; \
  113. src2+=src_stride2; \
  114. dst+=dst_stride; \
  115. } while(--h); \
  116. }\
  117. \
  118. static inline void OPNAME ## _no_rnd_pixels16_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  119. {\
  120. do {\
  121. OP(LP(dst ),no_rnd_avg32(LPC(src1 ),LPC(src2 )) ); \
  122. OP(LP(dst+4),no_rnd_avg32(LPC(src1+4),LPC(src2+4)) ); \
  123. OP(LP(dst+8),no_rnd_avg32(LPC(src1+8),LPC(src2+8)) ); \
  124. OP(LP(dst+12),no_rnd_avg32(LPC(src1+12),LPC(src2+12)) ); \
  125. src1+=src_stride1; \
  126. src2+=src_stride2; \
  127. dst+=dst_stride; \
  128. } while(--h); \
  129. }\
  130. \
  131. static inline void OPNAME ## _pixels16_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  132. {\
  133. do {\
  134. OP(LP(dst ),rnd_avg32(LPC(src1 ),LPC(src2 )) ); \
  135. OP(LP(dst+4),rnd_avg32(LPC(src1+4),LPC(src2+4)) ); \
  136. OP(LP(dst+8),rnd_avg32(LPC(src1+8),LPC(src2+8)) ); \
  137. OP(LP(dst+12),rnd_avg32(LPC(src1+12),LPC(src2+12)) ); \
  138. src1+=src_stride1; \
  139. src2+=src_stride2; \
  140. dst+=dst_stride; \
  141. } while(--h); \
  142. }\
  143. \
  144. static inline void OPNAME ## _no_rnd_pixels16_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  145. { OPNAME ## _no_rnd_pixels16_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
  146. \
  147. static inline void OPNAME ## _pixels16_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  148. { OPNAME ## _pixels16_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
  149. \
  150. static inline void OPNAME ## _no_rnd_pixels8_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  151. { OPNAME ## _no_rnd_pixels8_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
  152. \
  153. static inline void OPNAME ## _pixels8_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  154. { OPNAME ## _pixels8_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
  155. \
  156. static inline void OPNAME ## _pixels8_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  157. do { \
  158. uint32_t a0,a1,a2,a3; \
  159. UNPACK(a0,a1,LPC(src1),LPC(src2)); \
  160. UNPACK(a2,a3,LPC(src3),LPC(src4)); \
  161. OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
  162. UNPACK(a0,a1,LPC(src1+4),LPC(src2+4)); \
  163. UNPACK(a2,a3,LPC(src3+4),LPC(src4+4)); \
  164. OP(LP(dst+4),rnd_PACK(a0,a1,a2,a3)); \
  165. src1+=src_stride1;\
  166. src2+=src_stride2;\
  167. src3+=src_stride3;\
  168. src4+=src_stride4;\
  169. dst+=dst_stride;\
  170. } while(--h); \
  171. } \
  172. \
  173. static inline void OPNAME ## _no_rnd_pixels8_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  174. do { \
  175. uint32_t a0,a1,a2,a3; \
  176. UNPACK(a0,a1,LPC(src1),LPC(src2)); \
  177. UNPACK(a2,a3,LPC(src3),LPC(src4)); \
  178. OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
  179. UNPACK(a0,a1,LPC(src1+4),LPC(src2+4)); \
  180. UNPACK(a2,a3,LPC(src3+4),LPC(src4+4)); \
  181. OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
  182. src1+=src_stride1;\
  183. src2+=src_stride2;\
  184. src3+=src_stride3;\
  185. src4+=src_stride4;\
  186. dst+=dst_stride;\
  187. } while(--h); \
  188. } \
  189. \
  190. static inline void OPNAME ## _pixels8_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  191. do { \
  192. uint32_t a0,a1,a2,a3; /* src1 only not aligned */\
  193. UNPACK(a0,a1,AV_RN32(src1),LPC(src2)); \
  194. UNPACK(a2,a3,LPC(src3),LPC(src4)); \
  195. OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
  196. UNPACK(a0,a1,AV_RN32(src1+4),LPC(src2+4)); \
  197. UNPACK(a2,a3,LPC(src3+4),LPC(src4+4)); \
  198. OP(LP(dst+4),rnd_PACK(a0,a1,a2,a3)); \
  199. src1+=src_stride1;\
  200. src2+=src_stride2;\
  201. src3+=src_stride3;\
  202. src4+=src_stride4;\
  203. dst+=dst_stride;\
  204. } while(--h); \
  205. } \
  206. \
  207. static inline void OPNAME ## _no_rnd_pixels8_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  208. do { \
  209. uint32_t a0,a1,a2,a3; \
  210. UNPACK(a0,a1,AV_RN32(src1),LPC(src2)); \
  211. UNPACK(a2,a3,LPC(src3),LPC(src4)); \
  212. OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
  213. UNPACK(a0,a1,AV_RN32(src1+4),LPC(src2+4)); \
  214. UNPACK(a2,a3,LPC(src3+4),LPC(src4+4)); \
  215. OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
  216. src1+=src_stride1;\
  217. src2+=src_stride2;\
  218. src3+=src_stride3;\
  219. src4+=src_stride4;\
  220. dst+=dst_stride;\
  221. } while(--h); \
  222. } \
  223. \
  224. static inline void OPNAME ## _pixels16_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  225. do { \
  226. uint32_t a0,a1,a2,a3; \
  227. UNPACK(a0,a1,LPC(src1),LPC(src2)); \
  228. UNPACK(a2,a3,LPC(src3),LPC(src4)); \
  229. OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
  230. UNPACK(a0,a1,LPC(src1+4),LPC(src2+4)); \
  231. UNPACK(a2,a3,LPC(src3+4),LPC(src4+4)); \
  232. OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
  233. UNPACK(a0,a1,LPC(src1+8),LPC(src2+8)); \
  234. UNPACK(a2,a3,LPC(src3+8),LPC(src4+8)); \
  235. OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
  236. UNPACK(a0,a1,LPC(src1+12),LPC(src2+12)); \
  237. UNPACK(a2,a3,LPC(src3+12),LPC(src4+12)); \
  238. OP(LP(dst+12),rnd_PACK(a0,a1,a2,a3)); \
  239. src1+=src_stride1;\
  240. src2+=src_stride2;\
  241. src3+=src_stride3;\
  242. src4+=src_stride4;\
  243. dst+=dst_stride;\
  244. } while(--h); \
  245. } \
  246. \
  247. static inline void OPNAME ## _no_rnd_pixels16_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  248. do { \
  249. uint32_t a0,a1,a2,a3; \
  250. UNPACK(a0,a1,LPC(src1),LPC(src2)); \
  251. UNPACK(a2,a3,LPC(src3),LPC(src4)); \
  252. OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
  253. UNPACK(a0,a1,LPC(src1+4),LPC(src2+4)); \
  254. UNPACK(a2,a3,LPC(src3+4),LPC(src4+4)); \
  255. OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
  256. UNPACK(a0,a1,LPC(src1+8),LPC(src2+8)); \
  257. UNPACK(a2,a3,LPC(src3+8),LPC(src4+8)); \
  258. OP(LP(dst+8),no_rnd_PACK(a0,a1,a2,a3)); \
  259. UNPACK(a0,a1,LPC(src1+12),LPC(src2+12)); \
  260. UNPACK(a2,a3,LPC(src3+12),LPC(src4+12)); \
  261. OP(LP(dst+12),no_rnd_PACK(a0,a1,a2,a3)); \
  262. src1+=src_stride1;\
  263. src2+=src_stride2;\
  264. src3+=src_stride3;\
  265. src4+=src_stride4;\
  266. dst+=dst_stride;\
  267. } while(--h); \
  268. } \
  269. \
  270. static inline void OPNAME ## _pixels16_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  271. do { /* src1 is unaligned */\
  272. uint32_t a0,a1,a2,a3; \
  273. UNPACK(a0,a1,AV_RN32(src1),LPC(src2)); \
  274. UNPACK(a2,a3,LPC(src3),LPC(src4)); \
  275. OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
  276. UNPACK(a0,a1,AV_RN32(src1+4),LPC(src2+4)); \
  277. UNPACK(a2,a3,LPC(src3+4),LPC(src4+4)); \
  278. OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
  279. UNPACK(a0,a1,AV_RN32(src1+8),LPC(src2+8)); \
  280. UNPACK(a2,a3,LPC(src3+8),LPC(src4+8)); \
  281. OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
  282. UNPACK(a0,a1,AV_RN32(src1+12),LPC(src2+12)); \
  283. UNPACK(a2,a3,LPC(src3+12),LPC(src4+12)); \
  284. OP(LP(dst+12),rnd_PACK(a0,a1,a2,a3)); \
  285. src1+=src_stride1;\
  286. src2+=src_stride2;\
  287. src3+=src_stride3;\
  288. src4+=src_stride4;\
  289. dst+=dst_stride;\
  290. } while(--h); \
  291. } \
  292. \
  293. static inline void OPNAME ## _no_rnd_pixels16_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  294. do { \
  295. uint32_t a0,a1,a2,a3; \
  296. UNPACK(a0,a1,AV_RN32(src1),LPC(src2)); \
  297. UNPACK(a2,a3,LPC(src3),LPC(src4)); \
  298. OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
  299. UNPACK(a0,a1,AV_RN32(src1+4),LPC(src2+4)); \
  300. UNPACK(a2,a3,LPC(src3+4),LPC(src4+4)); \
  301. OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
  302. UNPACK(a0,a1,AV_RN32(src1+8),LPC(src2+8)); \
  303. UNPACK(a2,a3,LPC(src3+8),LPC(src4+8)); \
  304. OP(LP(dst+8),no_rnd_PACK(a0,a1,a2,a3)); \
  305. UNPACK(a0,a1,AV_RN32(src1+12),LPC(src2+12)); \
  306. UNPACK(a2,a3,LPC(src3+12),LPC(src4+12)); \
  307. OP(LP(dst+12),no_rnd_PACK(a0,a1,a2,a3)); \
  308. src1+=src_stride1;\
  309. src2+=src_stride2;\
  310. src3+=src_stride3;\
  311. src4+=src_stride4;\
  312. dst+=dst_stride;\
  313. } while(--h); \
  314. } \
  315. \
  316. #define op_avg(a, b) a = rnd_avg32(a,b)
  317. #define op_put(a, b) a = b
  318. PIXOP2(avg, op_avg)
  319. PIXOP2(put, op_put)
  320. #undef op_avg
  321. #undef op_put
  322. #define avg2(a,b) ((a+b+1)>>1)
  323. #define avg4(a,b,c,d) ((a+b+c+d+2)>>2)
  324. static void gmc1_c(uint8_t *dst, uint8_t *src, int stride, int h, int x16, int y16, int rounder)
  325. {
  326. const int A=(16-x16)*(16-y16);
  327. const int B=( x16)*(16-y16);
  328. const int C=(16-x16)*( y16);
  329. const int D=( x16)*( y16);
  330. do {
  331. int t0,t1,t2,t3;
  332. uint8_t *s0 = src;
  333. uint8_t *s1 = src+stride;
  334. t0 = *s0++; t2 = *s1++;
  335. t1 = *s0++; t3 = *s1++;
  336. dst[0]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
  337. t0 = *s0++; t2 = *s1++;
  338. dst[1]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
  339. t1 = *s0++; t3 = *s1++;
  340. dst[2]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
  341. t0 = *s0++; t2 = *s1++;
  342. dst[3]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
  343. t1 = *s0++; t3 = *s1++;
  344. dst[4]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
  345. t0 = *s0++; t2 = *s1++;
  346. dst[5]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
  347. t1 = *s0++; t3 = *s1++;
  348. dst[6]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
  349. t0 = *s0++; t2 = *s1++;
  350. dst[7]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
  351. dst+= stride;
  352. src+= stride;
  353. }while(--h);
  354. }
  355. #define QPEL_MC(r, OPNAME, RND, OP) \
  356. static void OPNAME ## mpeg4_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  357. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  358. do {\
  359. uint8_t *s = src; \
  360. int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
  361. src0= *s++;\
  362. src1= *s++;\
  363. src2= *s++;\
  364. src3= *s++;\
  365. src4= *s++;\
  366. OP(dst[0], (src0+src1)*20 - (src0+src2)*6 + (src1+src3)*3 - (src2+src4));\
  367. src5= *s++;\
  368. OP(dst[1], (src1+src2)*20 - (src0+src3)*6 + (src0+src4)*3 - (src1+src5));\
  369. src6= *s++;\
  370. OP(dst[2], (src2+src3)*20 - (src1+src4)*6 + (src0+src5)*3 - (src0+src6));\
  371. src7= *s++;\
  372. OP(dst[3], (src3+src4)*20 - (src2+src5)*6 + (src1+src6)*3 - (src0+src7));\
  373. src8= *s++;\
  374. OP(dst[4], (src4+src5)*20 - (src3+src6)*6 + (src2+src7)*3 - (src1+src8));\
  375. OP(dst[5], (src5+src6)*20 - (src4+src7)*6 + (src3+src8)*3 - (src2+src8));\
  376. OP(dst[6], (src6+src7)*20 - (src5+src8)*6 + (src4+src8)*3 - (src3+src7));\
  377. OP(dst[7], (src7+src8)*20 - (src6+src8)*6 + (src5+src7)*3 - (src4+src6));\
  378. dst+=dstStride;\
  379. src+=srcStride;\
  380. }while(--h);\
  381. }\
  382. \
  383. static void OPNAME ## mpeg4_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  384. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  385. int w=8;\
  386. do{\
  387. uint8_t *s = src, *d=dst;\
  388. int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
  389. src0 = *s; s+=srcStride; \
  390. src1 = *s; s+=srcStride; \
  391. src2 = *s; s+=srcStride; \
  392. src3 = *s; s+=srcStride; \
  393. src4 = *s; s+=srcStride; \
  394. OP(*d, (src0+src1)*20 - (src0+src2)*6 + (src1+src3)*3 - (src2+src4));d+=dstStride;\
  395. src5 = *s; s+=srcStride; \
  396. OP(*d, (src1+src2)*20 - (src0+src3)*6 + (src0+src4)*3 - (src1+src5));d+=dstStride;\
  397. src6 = *s; s+=srcStride; \
  398. OP(*d, (src2+src3)*20 - (src1+src4)*6 + (src0+src5)*3 - (src0+src6));d+=dstStride;\
  399. src7 = *s; s+=srcStride; \
  400. OP(*d, (src3+src4)*20 - (src2+src5)*6 + (src1+src6)*3 - (src0+src7));d+=dstStride;\
  401. src8 = *s; \
  402. OP(*d, (src4+src5)*20 - (src3+src6)*6 + (src2+src7)*3 - (src1+src8));d+=dstStride;\
  403. OP(*d, (src5+src6)*20 - (src4+src7)*6 + (src3+src8)*3 - (src2+src8));d+=dstStride;\
  404. OP(*d, (src6+src7)*20 - (src5+src8)*6 + (src4+src8)*3 - (src3+src7));d+=dstStride;\
  405. OP(*d, (src7+src8)*20 - (src6+src8)*6 + (src5+src7)*3 - (src4+src6));\
  406. dst++;\
  407. src++;\
  408. }while(--w);\
  409. }\
  410. \
  411. static void OPNAME ## mpeg4_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  412. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  413. do {\
  414. uint8_t *s = src;\
  415. int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
  416. int src9,src10,src11,src12,src13,src14,src15,src16;\
  417. src0= *s++;\
  418. src1= *s++;\
  419. src2= *s++;\
  420. src3= *s++;\
  421. src4= *s++;\
  422. OP(dst[ 0], (src0 +src1 )*20 - (src0 +src2 )*6 + (src1 +src3 )*3 - (src2 +src4 ));\
  423. src5= *s++;\
  424. OP(dst[ 1], (src1 +src2 )*20 - (src0 +src3 )*6 + (src0 +src4 )*3 - (src1 +src5 ));\
  425. src6= *s++;\
  426. OP(dst[ 2], (src2 +src3 )*20 - (src1 +src4 )*6 + (src0 +src5 )*3 - (src0 +src6 ));\
  427. src7= *s++;\
  428. OP(dst[ 3], (src3 +src4 )*20 - (src2 +src5 )*6 + (src1 +src6 )*3 - (src0 +src7 ));\
  429. src8= *s++;\
  430. OP(dst[ 4], (src4 +src5 )*20 - (src3 +src6 )*6 + (src2 +src7 )*3 - (src1 +src8 ));\
  431. src9= *s++;\
  432. OP(dst[ 5], (src5 +src6 )*20 - (src4 +src7 )*6 + (src3 +src8 )*3 - (src2 +src9 ));\
  433. src10= *s++;\
  434. OP(dst[ 6], (src6 +src7 )*20 - (src5 +src8 )*6 + (src4 +src9 )*3 - (src3 +src10));\
  435. src11= *s++;\
  436. OP(dst[ 7], (src7 +src8 )*20 - (src6 +src9 )*6 + (src5 +src10)*3 - (src4 +src11));\
  437. src12= *s++;\
  438. OP(dst[ 8], (src8 +src9 )*20 - (src7 +src10)*6 + (src6 +src11)*3 - (src5 +src12));\
  439. src13= *s++;\
  440. OP(dst[ 9], (src9 +src10)*20 - (src8 +src11)*6 + (src7 +src12)*3 - (src6 +src13));\
  441. src14= *s++;\
  442. OP(dst[10], (src10+src11)*20 - (src9 +src12)*6 + (src8 +src13)*3 - (src7 +src14));\
  443. src15= *s++;\
  444. OP(dst[11], (src11+src12)*20 - (src10+src13)*6 + (src9 +src14)*3 - (src8 +src15));\
  445. src16= *s++;\
  446. OP(dst[12], (src12+src13)*20 - (src11+src14)*6 + (src10+src15)*3 - (src9 +src16));\
  447. OP(dst[13], (src13+src14)*20 - (src12+src15)*6 + (src11+src16)*3 - (src10+src16));\
  448. OP(dst[14], (src14+src15)*20 - (src13+src16)*6 + (src12+src16)*3 - (src11+src15));\
  449. OP(dst[15], (src15+src16)*20 - (src14+src16)*6 + (src13+src15)*3 - (src12+src14));\
  450. dst+=dstStride;\
  451. src+=srcStride;\
  452. }while(--h);\
  453. }\
  454. \
  455. static void OPNAME ## mpeg4_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  456. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  457. int w=16;\
  458. do {\
  459. uint8_t *s = src, *d=dst;\
  460. int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
  461. int src9,src10,src11,src12,src13,src14,src15,src16;\
  462. src0 = *s; s+=srcStride; \
  463. src1 = *s; s+=srcStride; \
  464. src2 = *s; s+=srcStride; \
  465. src3 = *s; s+=srcStride; \
  466. src4 = *s; s+=srcStride; \
  467. OP(*d, (src0 +src1 )*20 - (src0 +src2 )*6 + (src1 +src3 )*3 - (src2 +src4 ));d+=dstStride;\
  468. src5 = *s; s+=srcStride; \
  469. OP(*d, (src1 +src2 )*20 - (src0 +src3 )*6 + (src0 +src4 )*3 - (src1 +src5 ));d+=dstStride;\
  470. src6 = *s; s+=srcStride; \
  471. OP(*d, (src2 +src3 )*20 - (src1 +src4 )*6 + (src0 +src5 )*3 - (src0 +src6 ));d+=dstStride;\
  472. src7 = *s; s+=srcStride; \
  473. OP(*d, (src3 +src4 )*20 - (src2 +src5 )*6 + (src1 +src6 )*3 - (src0 +src7 ));d+=dstStride;\
  474. src8 = *s; s+=srcStride; \
  475. OP(*d, (src4 +src5 )*20 - (src3 +src6 )*6 + (src2 +src7 )*3 - (src1 +src8 ));d+=dstStride;\
  476. src9 = *s; s+=srcStride; \
  477. OP(*d, (src5 +src6 )*20 - (src4 +src7 )*6 + (src3 +src8 )*3 - (src2 +src9 ));d+=dstStride;\
  478. src10 = *s; s+=srcStride; \
  479. OP(*d, (src6 +src7 )*20 - (src5 +src8 )*6 + (src4 +src9 )*3 - (src3 +src10));d+=dstStride;\
  480. src11 = *s; s+=srcStride; \
  481. OP(*d, (src7 +src8 )*20 - (src6 +src9 )*6 + (src5 +src10)*3 - (src4 +src11));d+=dstStride;\
  482. src12 = *s; s+=srcStride; \
  483. OP(*d, (src8 +src9 )*20 - (src7 +src10)*6 + (src6 +src11)*3 - (src5 +src12));d+=dstStride;\
  484. src13 = *s; s+=srcStride; \
  485. OP(*d, (src9 +src10)*20 - (src8 +src11)*6 + (src7 +src12)*3 - (src6 +src13));d+=dstStride;\
  486. src14 = *s; s+=srcStride; \
  487. OP(*d, (src10+src11)*20 - (src9 +src12)*6 + (src8 +src13)*3 - (src7 +src14));d+=dstStride;\
  488. src15 = *s; s+=srcStride; \
  489. OP(*d, (src11+src12)*20 - (src10+src13)*6 + (src9 +src14)*3 - (src8 +src15));d+=dstStride;\
  490. src16 = *s; \
  491. OP(*d, (src12+src13)*20 - (src11+src14)*6 + (src10+src15)*3 - (src9 +src16));d+=dstStride;\
  492. OP(*d, (src13+src14)*20 - (src12+src15)*6 + (src11+src16)*3 - (src10+src16));d+=dstStride;\
  493. OP(*d, (src14+src15)*20 - (src13+src16)*6 + (src12+src16)*3 - (src11+src15));d+=dstStride;\
  494. OP(*d, (src15+src16)*20 - (src14+src16)*6 + (src13+src15)*3 - (src12+src14));\
  495. dst++;\
  496. src++;\
  497. }while(--w);\
  498. }\
  499. \
  500. static void OPNAME ## qpel8_mc00_sh4 (uint8_t *dst, uint8_t *src, int stride){\
  501. OPNAME ## pixels8_c(dst, src, stride, 8);\
  502. }\
  503. \
  504. static void OPNAME ## qpel8_mc10_sh4(uint8_t *dst, uint8_t *src, int stride){\
  505. uint8_t half[64];\
  506. put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
  507. OPNAME ## pixels8_l2_aligned2(dst, src, half, stride, stride, 8, 8);\
  508. }\
  509. \
  510. static void OPNAME ## qpel8_mc20_sh4(uint8_t *dst, uint8_t *src, int stride){\
  511. OPNAME ## mpeg4_qpel8_h_lowpass(dst, src, stride, stride, 8);\
  512. }\
  513. \
  514. static void OPNAME ## qpel8_mc30_sh4(uint8_t *dst, uint8_t *src, int stride){\
  515. uint8_t half[64];\
  516. put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
  517. OPNAME ## pixels8_l2_aligned2(dst, src+1, half, stride, stride, 8, 8);\
  518. }\
  519. \
  520. static void OPNAME ## qpel8_mc01_sh4(uint8_t *dst, uint8_t *src, int stride){\
  521. uint8_t full[16*9];\
  522. uint8_t half[64];\
  523. copy_block9(full, src, 16, stride, 9);\
  524. put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
  525. OPNAME ## pixels8_l2_aligned(dst, full, half, stride, 16, 8, 8);\
  526. }\
  527. \
  528. static void OPNAME ## qpel8_mc02_sh4(uint8_t *dst, uint8_t *src, int stride){\
  529. uint8_t full[16*9];\
  530. copy_block9(full, src, 16, stride, 9);\
  531. OPNAME ## mpeg4_qpel8_v_lowpass(dst, full, stride, 16);\
  532. }\
  533. \
  534. static void OPNAME ## qpel8_mc03_sh4(uint8_t *dst, uint8_t *src, int stride){\
  535. uint8_t full[16*9];\
  536. uint8_t half[64];\
  537. copy_block9(full, src, 16, stride, 9);\
  538. put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
  539. OPNAME ## pixels8_l2_aligned(dst, full+16, half, stride, 16, 8, 8);\
  540. }\
  541. static void OPNAME ## qpel8_mc11_sh4(uint8_t *dst, uint8_t *src, int stride){\
  542. uint8_t full[16*9];\
  543. uint8_t halfH[72];\
  544. uint8_t halfHV[64];\
  545. copy_block9(full, src, 16, stride, 9);\
  546. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  547. put ## RND ## pixels8_l2_aligned(halfH, halfH, full, 8, 8, 16, 9);\
  548. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  549. OPNAME ## pixels8_l2_aligned(dst, halfH, halfHV, stride, 8, 8, 8);\
  550. }\
  551. static void OPNAME ## qpel8_mc31_sh4(uint8_t *dst, uint8_t *src, int stride){\
  552. uint8_t full[16*9];\
  553. uint8_t halfH[72];\
  554. uint8_t halfHV[64];\
  555. copy_block9(full, src, 16, stride, 9);\
  556. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  557. put ## RND ## pixels8_l2_aligned1(halfH, halfH, full+1, 8, 8, 16, 9);\
  558. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  559. OPNAME ## pixels8_l2_aligned(dst, halfH, halfHV, stride, 8, 8, 8);\
  560. }\
  561. static void OPNAME ## qpel8_mc13_sh4(uint8_t *dst, uint8_t *src, int stride){\
  562. uint8_t full[16*9];\
  563. uint8_t halfH[72];\
  564. uint8_t halfHV[64];\
  565. copy_block9(full, src, 16, stride, 9);\
  566. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  567. put ## RND ## pixels8_l2_aligned(halfH, halfH, full, 8, 8, 16, 9);\
  568. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  569. OPNAME ## pixels8_l2_aligned(dst, halfH+8, halfHV, stride, 8, 8, 8);\
  570. }\
  571. static void OPNAME ## qpel8_mc33_sh4(uint8_t *dst, uint8_t *src, int stride){\
  572. uint8_t full[16*9];\
  573. uint8_t halfH[72];\
  574. uint8_t halfHV[64];\
  575. copy_block9(full, src, 16, stride, 9);\
  576. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  577. put ## RND ## pixels8_l2_aligned1(halfH, halfH, full+1, 8, 8, 16, 9);\
  578. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  579. OPNAME ## pixels8_l2_aligned(dst, halfH+8, halfHV, stride, 8, 8, 8);\
  580. }\
  581. static void OPNAME ## qpel8_mc21_sh4(uint8_t *dst, uint8_t *src, int stride){\
  582. uint8_t halfH[72];\
  583. uint8_t halfHV[64];\
  584. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
  585. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  586. OPNAME ## pixels8_l2_aligned(dst, halfH, halfHV, stride, 8, 8, 8);\
  587. }\
  588. static void OPNAME ## qpel8_mc23_sh4(uint8_t *dst, uint8_t *src, int stride){\
  589. uint8_t halfH[72];\
  590. uint8_t halfHV[64];\
  591. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
  592. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  593. OPNAME ## pixels8_l2_aligned(dst, halfH+8, halfHV, stride, 8, 8, 8);\
  594. }\
  595. static void OPNAME ## qpel8_mc12_sh4(uint8_t *dst, uint8_t *src, int stride){\
  596. uint8_t full[16*9];\
  597. uint8_t halfH[72];\
  598. copy_block9(full, src, 16, stride, 9);\
  599. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  600. put ## RND ## pixels8_l2_aligned(halfH, halfH, full, 8, 8, 16, 9);\
  601. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
  602. }\
  603. static void OPNAME ## qpel8_mc32_sh4(uint8_t *dst, uint8_t *src, int stride){\
  604. uint8_t full[16*9];\
  605. uint8_t halfH[72];\
  606. copy_block9(full, src, 16, stride, 9);\
  607. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  608. put ## RND ## pixels8_l2_aligned1(halfH, halfH, full+1, 8, 8, 16, 9);\
  609. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
  610. }\
  611. static void OPNAME ## qpel8_mc22_sh4(uint8_t *dst, uint8_t *src, int stride){\
  612. uint8_t halfH[72];\
  613. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
  614. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
  615. }\
  616. static void OPNAME ## qpel16_mc00_sh4 (uint8_t *dst, uint8_t *src, int stride){\
  617. OPNAME ## pixels16_c(dst, src, stride, 16);\
  618. }\
  619. \
  620. static void OPNAME ## qpel16_mc10_sh4(uint8_t *dst, uint8_t *src, int stride){\
  621. uint8_t half[256];\
  622. put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
  623. OPNAME ## pixels16_l2_aligned2(dst, src, half, stride, stride, 16, 16);\
  624. }\
  625. \
  626. static void OPNAME ## qpel16_mc20_sh4(uint8_t *dst, uint8_t *src, int stride){\
  627. OPNAME ## mpeg4_qpel16_h_lowpass(dst, src, stride, stride, 16);\
  628. }\
  629. \
  630. static void OPNAME ## qpel16_mc30_sh4(uint8_t *dst, uint8_t *src, int stride){\
  631. uint8_t half[256];\
  632. put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
  633. OPNAME ## pixels16_l2_aligned2(dst, src+1, half, stride, stride, 16, 16);\
  634. }\
  635. \
  636. static void OPNAME ## qpel16_mc01_sh4(uint8_t *dst, uint8_t *src, int stride){\
  637. uint8_t full[24*17];\
  638. uint8_t half[256];\
  639. copy_block17(full, src, 24, stride, 17);\
  640. put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
  641. OPNAME ## pixels16_l2_aligned(dst, full, half, stride, 24, 16, 16);\
  642. }\
  643. \
  644. static void OPNAME ## qpel16_mc02_sh4(uint8_t *dst, uint8_t *src, int stride){\
  645. uint8_t full[24*17];\
  646. copy_block17(full, src, 24, stride, 17);\
  647. OPNAME ## mpeg4_qpel16_v_lowpass(dst, full, stride, 24);\
  648. }\
  649. \
  650. static void OPNAME ## qpel16_mc03_sh4(uint8_t *dst, uint8_t *src, int stride){\
  651. uint8_t full[24*17];\
  652. uint8_t half[256];\
  653. copy_block17(full, src, 24, stride, 17);\
  654. put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
  655. OPNAME ## pixels16_l2_aligned(dst, full+24, half, stride, 24, 16, 16);\
  656. }\
  657. static void OPNAME ## qpel16_mc11_sh4(uint8_t *dst, uint8_t *src, int stride){\
  658. uint8_t full[24*17];\
  659. uint8_t halfH[272];\
  660. uint8_t halfHV[256];\
  661. copy_block17(full, src, 24, stride, 17);\
  662. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  663. put ## RND ## pixels16_l2_aligned(halfH, halfH, full, 16, 16, 24, 17);\
  664. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  665. OPNAME ## pixels16_l2_aligned(dst, halfH, halfHV, stride, 16, 16, 16);\
  666. }\
  667. static void OPNAME ## qpel16_mc31_sh4(uint8_t *dst, uint8_t *src, int stride){\
  668. uint8_t full[24*17];\
  669. uint8_t halfH[272];\
  670. uint8_t halfHV[256];\
  671. copy_block17(full, src, 24, stride, 17);\
  672. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  673. put ## RND ## pixels16_l2_aligned1(halfH, halfH, full+1, 16, 16, 24, 17);\
  674. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  675. OPNAME ## pixels16_l2_aligned(dst, halfH, halfHV, stride, 16, 16, 16);\
  676. }\
  677. static void OPNAME ## qpel16_mc13_sh4(uint8_t *dst, uint8_t *src, int stride){\
  678. uint8_t full[24*17];\
  679. uint8_t halfH[272];\
  680. uint8_t halfHV[256];\
  681. copy_block17(full, src, 24, stride, 17);\
  682. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  683. put ## RND ## pixels16_l2_aligned(halfH, halfH, full, 16, 16, 24, 17);\
  684. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  685. OPNAME ## pixels16_l2_aligned(dst, halfH+16, halfHV, stride, 16, 16, 16);\
  686. }\
  687. static void OPNAME ## qpel16_mc33_sh4(uint8_t *dst, uint8_t *src, int stride){\
  688. uint8_t full[24*17];\
  689. uint8_t halfH[272];\
  690. uint8_t halfHV[256];\
  691. copy_block17(full, src, 24, stride, 17);\
  692. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  693. put ## RND ## pixels16_l2_aligned1(halfH, halfH, full+1, 16, 16, 24, 17);\
  694. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  695. OPNAME ## pixels16_l2_aligned(dst, halfH+16, halfHV, stride, 16, 16, 16);\
  696. }\
  697. static void OPNAME ## qpel16_mc21_sh4(uint8_t *dst, uint8_t *src, int stride){\
  698. uint8_t halfH[272];\
  699. uint8_t halfHV[256];\
  700. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
  701. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  702. OPNAME ## pixels16_l2_aligned(dst, halfH, halfHV, stride, 16, 16, 16);\
  703. }\
  704. static void OPNAME ## qpel16_mc23_sh4(uint8_t *dst, uint8_t *src, int stride){\
  705. uint8_t halfH[272];\
  706. uint8_t halfHV[256];\
  707. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
  708. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  709. OPNAME ## pixels16_l2_aligned(dst, halfH+16, halfHV, stride, 16, 16, 16);\
  710. }\
  711. static void OPNAME ## qpel16_mc12_sh4(uint8_t *dst, uint8_t *src, int stride){\
  712. uint8_t full[24*17];\
  713. uint8_t halfH[272];\
  714. copy_block17(full, src, 24, stride, 17);\
  715. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  716. put ## RND ## pixels16_l2_aligned(halfH, halfH, full, 16, 16, 24, 17);\
  717. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
  718. }\
  719. static void OPNAME ## qpel16_mc32_sh4(uint8_t *dst, uint8_t *src, int stride){\
  720. uint8_t full[24*17];\
  721. uint8_t halfH[272];\
  722. copy_block17(full, src, 24, stride, 17);\
  723. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  724. put ## RND ## pixels16_l2_aligned1(halfH, halfH, full+1, 16, 16, 24, 17);\
  725. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
  726. }\
  727. static void OPNAME ## qpel16_mc22_sh4(uint8_t *dst, uint8_t *src, int stride){\
  728. uint8_t halfH[272];\
  729. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
  730. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
  731. }
  732. #define op_avg(a, b) a = (((a)+cm[((b) + 16)>>5]+1)>>1)
  733. #define op_avg_no_rnd(a, b) a = (((a)+cm[((b) + 15)>>5])>>1)
  734. #define op_put(a, b) a = cm[((b) + 16)>>5]
  735. #define op_put_no_rnd(a, b) a = cm[((b) + 15)>>5]
  736. QPEL_MC(0, put_ , _ , op_put)
  737. QPEL_MC(1, put_no_rnd_, _no_rnd_, op_put_no_rnd)
  738. QPEL_MC(0, avg_ , _ , op_avg)
  739. //QPEL_MC(1, avg_no_rnd , _ , op_avg)
  740. #undef op_avg
  741. #undef op_avg_no_rnd
  742. #undef op_put
  743. #undef op_put_no_rnd
  744. static void wmv2_mspel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){
  745. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  746. do{
  747. int src_1,src0,src1,src2,src3,src4,src5,src6,src7,src8,src9;
  748. uint8_t *s = src;
  749. src_1 = s[-1];
  750. src0 = *s++;
  751. src1 = *s++;
  752. src2 = *s++;
  753. dst[0]= cm[(9*(src0 + src1) - (src_1 + src2) + 8)>>4];
  754. src3 = *s++;
  755. dst[1]= cm[(9*(src1 + src2) - (src0 + src3) + 8)>>4];
  756. src4 = *s++;
  757. dst[2]= cm[(9*(src2 + src3) - (src1 + src4) + 8)>>4];
  758. src5 = *s++;
  759. dst[3]= cm[(9*(src3 + src4) - (src2 + src5) + 8)>>4];
  760. src6 = *s++;
  761. dst[4]= cm[(9*(src4 + src5) - (src3 + src6) + 8)>>4];
  762. src7 = *s++;
  763. dst[5]= cm[(9*(src5 + src6) - (src4 + src7) + 8)>>4];
  764. src8 = *s++;
  765. dst[6]= cm[(9*(src6 + src7) - (src5 + src8) + 8)>>4];
  766. src9 = *s++;
  767. dst[7]= cm[(9*(src7 + src8) - (src6 + src9) + 8)>>4];
  768. dst+=dstStride;
  769. src+=srcStride;
  770. }while(--h);
  771. }
  772. static void wmv2_mspel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int w){
  773. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  774. do{
  775. int src_1,src0,src1,src2,src3,src4,src5,src6,src7,src8,src9;
  776. uint8_t *s = src,*d = dst;
  777. src_1 = *(s-srcStride);
  778. src0 = *s; s+=srcStride;
  779. src1 = *s; s+=srcStride;
  780. src2 = *s; s+=srcStride;
  781. *d= cm[(9*(src0 + src1) - (src_1 + src2) + 8)>>4]; d+=dstStride;
  782. src3 = *s; s+=srcStride;
  783. *d= cm[(9*(src1 + src2) - (src0 + src3) + 8)>>4]; d+=dstStride;
  784. src4 = *s; s+=srcStride;
  785. *d= cm[(9*(src2 + src3) - (src1 + src4) + 8)>>4]; d+=dstStride;
  786. src5 = *s; s+=srcStride;
  787. *d= cm[(9*(src3 + src4) - (src2 + src5) + 8)>>4]; d+=dstStride;
  788. src6 = *s; s+=srcStride;
  789. *d= cm[(9*(src4 + src5) - (src3 + src6) + 8)>>4]; d+=dstStride;
  790. src7 = *s; s+=srcStride;
  791. *d= cm[(9*(src5 + src6) - (src4 + src7) + 8)>>4]; d+=dstStride;
  792. src8 = *s; s+=srcStride;
  793. *d= cm[(9*(src6 + src7) - (src5 + src8) + 8)>>4]; d+=dstStride;
  794. src9 = *s;
  795. *d= cm[(9*(src7 + src8) - (src6 + src9) + 8)>>4]; d+=dstStride;
  796. src++;
  797. dst++;
  798. }while(--w);
  799. }
  800. static void put_mspel8_mc00_sh4 (uint8_t *dst, uint8_t *src, int stride){
  801. put_pixels8_c(dst, src, stride, 8);
  802. }
  803. static void put_mspel8_mc10_sh4(uint8_t *dst, uint8_t *src, int stride){
  804. uint8_t half[64];
  805. wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
  806. put_pixels8_l2_aligned2(dst, src, half, stride, stride, 8, 8);
  807. }
  808. static void put_mspel8_mc20_sh4(uint8_t *dst, uint8_t *src, int stride){
  809. wmv2_mspel8_h_lowpass(dst, src, stride, stride, 8);
  810. }
  811. static void put_mspel8_mc30_sh4(uint8_t *dst, uint8_t *src, int stride){
  812. uint8_t half[64];
  813. wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
  814. put_pixels8_l2_aligned2(dst, src+1, half, stride, stride, 8, 8);
  815. }
  816. static void put_mspel8_mc02_sh4(uint8_t *dst, uint8_t *src, int stride){
  817. wmv2_mspel8_v_lowpass(dst, src, stride, stride, 8);
  818. }
  819. static void put_mspel8_mc12_sh4(uint8_t *dst, uint8_t *src, int stride){
  820. uint8_t halfH[88];
  821. uint8_t halfV[64];
  822. uint8_t halfHV[64];
  823. wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
  824. wmv2_mspel8_v_lowpass(halfV, src, 8, stride, 8);
  825. wmv2_mspel8_v_lowpass(halfHV, halfH+8, 8, 8, 8);
  826. put_pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);
  827. }
  828. static void put_mspel8_mc32_sh4(uint8_t *dst, uint8_t *src, int stride){
  829. uint8_t halfH[88];
  830. uint8_t halfV[64];
  831. uint8_t halfHV[64];
  832. wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
  833. wmv2_mspel8_v_lowpass(halfV, src+1, 8, stride, 8);
  834. wmv2_mspel8_v_lowpass(halfHV, halfH+8, 8, 8, 8);
  835. put_pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);
  836. }
  837. static void put_mspel8_mc22_sh4(uint8_t *dst, uint8_t *src, int stride){
  838. uint8_t halfH[88];
  839. wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
  840. wmv2_mspel8_v_lowpass(dst, halfH+8, stride, 8, 8);
  841. }