You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

952 lines
41KB

  1. /*
  2. * This is optimized for sh, which have post increment addressing (*p++).
  3. * Some CPU may be index (p[n]) faster than post increment (*p++).
  4. *
  5. * copyright (c) 2001-2003 BERO <bero@geocities.co.jp>
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #include "libavutil/common.h"
  24. #define PIXOP2(OPNAME, OP) \
  25. \
  26. static inline void OPNAME ## _pixels4_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  27. {\
  28. do {\
  29. OP(LP(dst ),rnd_avg32(LPC(src1 ),LPC(src2 )) ); \
  30. src1+=src_stride1; \
  31. src2+=src_stride2; \
  32. dst+=dst_stride; \
  33. } while(--h); \
  34. }\
  35. \
  36. static inline void OPNAME ## _pixels4_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  37. {\
  38. do {\
  39. OP(LP(dst ),rnd_avg32(AV_RN32(src1 ),LPC(src2 )) ); \
  40. src1+=src_stride1; \
  41. src2+=src_stride2; \
  42. dst+=dst_stride; \
  43. } while(--h); \
  44. }\
  45. \
  46. static inline void OPNAME ## _no_rnd_pixels16_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  47. {\
  48. do {\
  49. OP(LP(dst ),no_rnd_avg32(AV_RN32(src1 ),LPC(src2 )) ); \
  50. OP(LP(dst+4),no_rnd_avg32(AV_RN32(src1+4),LPC(src2+4)) ); \
  51. OP(LP(dst+8),no_rnd_avg32(AV_RN32(src1+8),LPC(src2+8)) ); \
  52. OP(LP(dst+12),no_rnd_avg32(AV_RN32(src1+12),LPC(src2+12)) ); \
  53. src1+=src_stride1; \
  54. src2+=src_stride2; \
  55. dst+=dst_stride; \
  56. } while(--h); \
  57. }\
  58. \
  59. static inline void OPNAME ## _pixels16_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  60. {\
  61. do {\
  62. OP(LP(dst ),rnd_avg32(AV_RN32(src1 ),LPC(src2 )) ); \
  63. OP(LP(dst+4),rnd_avg32(AV_RN32(src1+4),LPC(src2+4)) ); \
  64. OP(LP(dst+8),rnd_avg32(AV_RN32(src1+8),LPC(src2+8)) ); \
  65. OP(LP(dst+12),rnd_avg32(AV_RN32(src1+12),LPC(src2+12)) ); \
  66. src1+=src_stride1; \
  67. src2+=src_stride2; \
  68. dst+=dst_stride; \
  69. } while(--h); \
  70. }\
  71. \
  72. static inline void OPNAME ## _no_rnd_pixels8_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  73. {\
  74. do { /* onlye src2 aligned */\
  75. OP(LP(dst ),no_rnd_avg32(AV_RN32(src1 ),LPC(src2 )) ); \
  76. OP(LP(dst+4),no_rnd_avg32(AV_RN32(src1+4),LPC(src2+4)) ); \
  77. src1+=src_stride1; \
  78. src2+=src_stride2; \
  79. dst+=dst_stride; \
  80. } while(--h); \
  81. }\
  82. \
  83. static inline void OPNAME ## _pixels8_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  84. {\
  85. do {\
  86. OP(LP(dst ),rnd_avg32(AV_RN32(src1 ),LPC(src2 )) ); \
  87. OP(LP(dst+4),rnd_avg32(AV_RN32(src1+4),LPC(src2+4)) ); \
  88. src1+=src_stride1; \
  89. src2+=src_stride2; \
  90. dst+=dst_stride; \
  91. } while(--h); \
  92. }\
  93. \
  94. static inline void OPNAME ## _no_rnd_pixels8_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  95. {\
  96. do {\
  97. OP(LP(dst ),no_rnd_avg32(LPC(src1 ),LPC(src2 )) ); \
  98. OP(LP(dst+4),no_rnd_avg32(LPC(src1+4),LPC(src2+4)) ); \
  99. src1+=src_stride1; \
  100. src2+=src_stride2; \
  101. dst+=dst_stride; \
  102. } while(--h); \
  103. }\
  104. \
  105. static inline void OPNAME ## _pixels8_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  106. {\
  107. do {\
  108. OP(LP(dst ),rnd_avg32(LPC(src1 ),LPC(src2 )) ); \
  109. OP(LP(dst+4),rnd_avg32(LPC(src1+4),LPC(src2+4)) ); \
  110. src1+=src_stride1; \
  111. src2+=src_stride2; \
  112. dst+=dst_stride; \
  113. } while(--h); \
  114. }\
  115. \
  116. static inline void OPNAME ## _no_rnd_pixels16_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  117. {\
  118. do {\
  119. OP(LP(dst ),no_rnd_avg32(LPC(src1 ),LPC(src2 )) ); \
  120. OP(LP(dst+4),no_rnd_avg32(LPC(src1+4),LPC(src2+4)) ); \
  121. OP(LP(dst+8),no_rnd_avg32(LPC(src1+8),LPC(src2+8)) ); \
  122. OP(LP(dst+12),no_rnd_avg32(LPC(src1+12),LPC(src2+12)) ); \
  123. src1+=src_stride1; \
  124. src2+=src_stride2; \
  125. dst+=dst_stride; \
  126. } while(--h); \
  127. }\
  128. \
  129. static inline void OPNAME ## _pixels16_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  130. {\
  131. do {\
  132. OP(LP(dst ),rnd_avg32(LPC(src1 ),LPC(src2 )) ); \
  133. OP(LP(dst+4),rnd_avg32(LPC(src1+4),LPC(src2+4)) ); \
  134. OP(LP(dst+8),rnd_avg32(LPC(src1+8),LPC(src2+8)) ); \
  135. OP(LP(dst+12),rnd_avg32(LPC(src1+12),LPC(src2+12)) ); \
  136. src1+=src_stride1; \
  137. src2+=src_stride2; \
  138. dst+=dst_stride; \
  139. } while(--h); \
  140. }\
  141. \
  142. static inline void OPNAME ## _no_rnd_pixels16_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  143. { OPNAME ## _no_rnd_pixels16_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
  144. \
  145. static inline void OPNAME ## _pixels16_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  146. { OPNAME ## _pixels16_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
  147. \
  148. static inline void OPNAME ## _no_rnd_pixels8_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  149. { OPNAME ## _no_rnd_pixels8_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
  150. \
  151. static inline void OPNAME ## _pixels8_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  152. { OPNAME ## _pixels8_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
  153. \
  154. static inline void OPNAME ## _pixels8_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  155. do { \
  156. uint32_t a0,a1,a2,a3; \
  157. UNPACK(a0,a1,LPC(src1),LPC(src2)); \
  158. UNPACK(a2,a3,LPC(src3),LPC(src4)); \
  159. OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
  160. UNPACK(a0,a1,LPC(src1+4),LPC(src2+4)); \
  161. UNPACK(a2,a3,LPC(src3+4),LPC(src4+4)); \
  162. OP(LP(dst+4),rnd_PACK(a0,a1,a2,a3)); \
  163. src1+=src_stride1;\
  164. src2+=src_stride2;\
  165. src3+=src_stride3;\
  166. src4+=src_stride4;\
  167. dst+=dst_stride;\
  168. } while(--h); \
  169. } \
  170. \
  171. static inline void OPNAME ## _no_rnd_pixels8_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  172. do { \
  173. uint32_t a0,a1,a2,a3; \
  174. UNPACK(a0,a1,LPC(src1),LPC(src2)); \
  175. UNPACK(a2,a3,LPC(src3),LPC(src4)); \
  176. OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
  177. UNPACK(a0,a1,LPC(src1+4),LPC(src2+4)); \
  178. UNPACK(a2,a3,LPC(src3+4),LPC(src4+4)); \
  179. OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
  180. src1+=src_stride1;\
  181. src2+=src_stride2;\
  182. src3+=src_stride3;\
  183. src4+=src_stride4;\
  184. dst+=dst_stride;\
  185. } while(--h); \
  186. } \
  187. \
  188. static inline void OPNAME ## _pixels8_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  189. do { \
  190. uint32_t a0,a1,a2,a3; /* src1 only not aligned */\
  191. UNPACK(a0,a1,AV_RN32(src1),LPC(src2)); \
  192. UNPACK(a2,a3,LPC(src3),LPC(src4)); \
  193. OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
  194. UNPACK(a0,a1,AV_RN32(src1+4),LPC(src2+4)); \
  195. UNPACK(a2,a3,LPC(src3+4),LPC(src4+4)); \
  196. OP(LP(dst+4),rnd_PACK(a0,a1,a2,a3)); \
  197. src1+=src_stride1;\
  198. src2+=src_stride2;\
  199. src3+=src_stride3;\
  200. src4+=src_stride4;\
  201. dst+=dst_stride;\
  202. } while(--h); \
  203. } \
  204. \
  205. static inline void OPNAME ## _no_rnd_pixels8_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  206. do { \
  207. uint32_t a0,a1,a2,a3; \
  208. UNPACK(a0,a1,AV_RN32(src1),LPC(src2)); \
  209. UNPACK(a2,a3,LPC(src3),LPC(src4)); \
  210. OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
  211. UNPACK(a0,a1,AV_RN32(src1+4),LPC(src2+4)); \
  212. UNPACK(a2,a3,LPC(src3+4),LPC(src4+4)); \
  213. OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
  214. src1+=src_stride1;\
  215. src2+=src_stride2;\
  216. src3+=src_stride3;\
  217. src4+=src_stride4;\
  218. dst+=dst_stride;\
  219. } while(--h); \
  220. } \
  221. \
  222. static inline void OPNAME ## _pixels16_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  223. do { \
  224. uint32_t a0,a1,a2,a3; \
  225. UNPACK(a0,a1,LPC(src1),LPC(src2)); \
  226. UNPACK(a2,a3,LPC(src3),LPC(src4)); \
  227. OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
  228. UNPACK(a0,a1,LPC(src1+4),LPC(src2+4)); \
  229. UNPACK(a2,a3,LPC(src3+4),LPC(src4+4)); \
  230. OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
  231. UNPACK(a0,a1,LPC(src1+8),LPC(src2+8)); \
  232. UNPACK(a2,a3,LPC(src3+8),LPC(src4+8)); \
  233. OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
  234. UNPACK(a0,a1,LPC(src1+12),LPC(src2+12)); \
  235. UNPACK(a2,a3,LPC(src3+12),LPC(src4+12)); \
  236. OP(LP(dst+12),rnd_PACK(a0,a1,a2,a3)); \
  237. src1+=src_stride1;\
  238. src2+=src_stride2;\
  239. src3+=src_stride3;\
  240. src4+=src_stride4;\
  241. dst+=dst_stride;\
  242. } while(--h); \
  243. } \
  244. \
  245. static inline void OPNAME ## _no_rnd_pixels16_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  246. do { \
  247. uint32_t a0,a1,a2,a3; \
  248. UNPACK(a0,a1,LPC(src1),LPC(src2)); \
  249. UNPACK(a2,a3,LPC(src3),LPC(src4)); \
  250. OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
  251. UNPACK(a0,a1,LPC(src1+4),LPC(src2+4)); \
  252. UNPACK(a2,a3,LPC(src3+4),LPC(src4+4)); \
  253. OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
  254. UNPACK(a0,a1,LPC(src1+8),LPC(src2+8)); \
  255. UNPACK(a2,a3,LPC(src3+8),LPC(src4+8)); \
  256. OP(LP(dst+8),no_rnd_PACK(a0,a1,a2,a3)); \
  257. UNPACK(a0,a1,LPC(src1+12),LPC(src2+12)); \
  258. UNPACK(a2,a3,LPC(src3+12),LPC(src4+12)); \
  259. OP(LP(dst+12),no_rnd_PACK(a0,a1,a2,a3)); \
  260. src1+=src_stride1;\
  261. src2+=src_stride2;\
  262. src3+=src_stride3;\
  263. src4+=src_stride4;\
  264. dst+=dst_stride;\
  265. } while(--h); \
  266. } \
  267. \
  268. static inline void OPNAME ## _pixels16_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  269. do { /* src1 is unaligned */\
  270. uint32_t a0,a1,a2,a3; \
  271. UNPACK(a0,a1,AV_RN32(src1),LPC(src2)); \
  272. UNPACK(a2,a3,LPC(src3),LPC(src4)); \
  273. OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
  274. UNPACK(a0,a1,AV_RN32(src1+4),LPC(src2+4)); \
  275. UNPACK(a2,a3,LPC(src3+4),LPC(src4+4)); \
  276. OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
  277. UNPACK(a0,a1,AV_RN32(src1+8),LPC(src2+8)); \
  278. UNPACK(a2,a3,LPC(src3+8),LPC(src4+8)); \
  279. OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
  280. UNPACK(a0,a1,AV_RN32(src1+12),LPC(src2+12)); \
  281. UNPACK(a2,a3,LPC(src3+12),LPC(src4+12)); \
  282. OP(LP(dst+12),rnd_PACK(a0,a1,a2,a3)); \
  283. src1+=src_stride1;\
  284. src2+=src_stride2;\
  285. src3+=src_stride3;\
  286. src4+=src_stride4;\
  287. dst+=dst_stride;\
  288. } while(--h); \
  289. } \
  290. \
  291. static inline void OPNAME ## _no_rnd_pixels16_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  292. do { \
  293. uint32_t a0,a1,a2,a3; \
  294. UNPACK(a0,a1,AV_RN32(src1),LPC(src2)); \
  295. UNPACK(a2,a3,LPC(src3),LPC(src4)); \
  296. OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
  297. UNPACK(a0,a1,AV_RN32(src1+4),LPC(src2+4)); \
  298. UNPACK(a2,a3,LPC(src3+4),LPC(src4+4)); \
  299. OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
  300. UNPACK(a0,a1,AV_RN32(src1+8),LPC(src2+8)); \
  301. UNPACK(a2,a3,LPC(src3+8),LPC(src4+8)); \
  302. OP(LP(dst+8),no_rnd_PACK(a0,a1,a2,a3)); \
  303. UNPACK(a0,a1,AV_RN32(src1+12),LPC(src2+12)); \
  304. UNPACK(a2,a3,LPC(src3+12),LPC(src4+12)); \
  305. OP(LP(dst+12),no_rnd_PACK(a0,a1,a2,a3)); \
  306. src1+=src_stride1;\
  307. src2+=src_stride2;\
  308. src3+=src_stride3;\
  309. src4+=src_stride4;\
  310. dst+=dst_stride;\
  311. } while(--h); \
  312. } \
  313. \
  314. #define op_avg(a, b) a = rnd_avg32(a,b)
  315. #define op_put(a, b) a = b
  316. PIXOP2(avg, op_avg)
  317. PIXOP2(put, op_put)
  318. #undef op_avg
  319. #undef op_put
  320. #define avg2(a,b) ((a+b+1)>>1)
  321. #define avg4(a,b,c,d) ((a+b+c+d+2)>>2)
  322. static void gmc1_c(uint8_t *dst, uint8_t *src, int stride, int h, int x16, int y16, int rounder)
  323. {
  324. const int A=(16-x16)*(16-y16);
  325. const int B=( x16)*(16-y16);
  326. const int C=(16-x16)*( y16);
  327. const int D=( x16)*( y16);
  328. do {
  329. int t0,t1,t2,t3;
  330. uint8_t *s0 = src;
  331. uint8_t *s1 = src+stride;
  332. t0 = *s0++; t2 = *s1++;
  333. t1 = *s0++; t3 = *s1++;
  334. dst[0]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
  335. t0 = *s0++; t2 = *s1++;
  336. dst[1]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
  337. t1 = *s0++; t3 = *s1++;
  338. dst[2]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
  339. t0 = *s0++; t2 = *s1++;
  340. dst[3]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
  341. t1 = *s0++; t3 = *s1++;
  342. dst[4]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
  343. t0 = *s0++; t2 = *s1++;
  344. dst[5]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
  345. t1 = *s0++; t3 = *s1++;
  346. dst[6]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
  347. t0 = *s0++; t2 = *s1++;
  348. dst[7]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
  349. dst+= stride;
  350. src+= stride;
  351. }while(--h);
  352. }
  353. #define H264_CHROMA_MC(OPNAME, OP)\
  354. static void OPNAME ## h264_chroma_mc2_sh4(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
  355. const int A=(8-x)*(8-y);\
  356. const int B=( x)*(8-y);\
  357. const int C=(8-x)*( y);\
  358. const int D=( x)*( y);\
  359. \
  360. assert(x<8 && y<8 && x>=0 && y>=0);\
  361. \
  362. do {\
  363. int t0,t1,t2,t3; \
  364. uint8_t *s0 = src; \
  365. uint8_t *s1 = src+stride; \
  366. t0 = *s0++; t2 = *s1++; \
  367. t1 = *s0++; t3 = *s1++; \
  368. OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
  369. t0 = *s0++; t2 = *s1++; \
  370. OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
  371. dst+= stride;\
  372. src+= stride;\
  373. }while(--h);\
  374. }\
  375. \
  376. static void OPNAME ## h264_chroma_mc4_sh4(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
  377. const int A=(8-x)*(8-y);\
  378. const int B=( x)*(8-y);\
  379. const int C=(8-x)*( y);\
  380. const int D=( x)*( y);\
  381. \
  382. assert(x<8 && y<8 && x>=0 && y>=0);\
  383. \
  384. do {\
  385. int t0,t1,t2,t3; \
  386. uint8_t *s0 = src; \
  387. uint8_t *s1 = src+stride; \
  388. t0 = *s0++; t2 = *s1++; \
  389. t1 = *s0++; t3 = *s1++; \
  390. OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
  391. t0 = *s0++; t2 = *s1++; \
  392. OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
  393. t1 = *s0++; t3 = *s1++; \
  394. OP(dst[2], (A*t0 + B*t1 + C*t2 + D*t3));\
  395. t0 = *s0++; t2 = *s1++; \
  396. OP(dst[3], (A*t1 + B*t0 + C*t3 + D*t2));\
  397. dst+= stride;\
  398. src+= stride;\
  399. }while(--h);\
  400. }\
  401. \
  402. static void OPNAME ## h264_chroma_mc8_sh4(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
  403. const int A=(8-x)*(8-y);\
  404. const int B=( x)*(8-y);\
  405. const int C=(8-x)*( y);\
  406. const int D=( x)*( y);\
  407. \
  408. assert(x<8 && y<8 && x>=0 && y>=0);\
  409. \
  410. do {\
  411. int t0,t1,t2,t3; \
  412. uint8_t *s0 = src; \
  413. uint8_t *s1 = src+stride; \
  414. t0 = *s0++; t2 = *s1++; \
  415. t1 = *s0++; t3 = *s1++; \
  416. OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
  417. t0 = *s0++; t2 = *s1++; \
  418. OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
  419. t1 = *s0++; t3 = *s1++; \
  420. OP(dst[2], (A*t0 + B*t1 + C*t2 + D*t3));\
  421. t0 = *s0++; t2 = *s1++; \
  422. OP(dst[3], (A*t1 + B*t0 + C*t3 + D*t2));\
  423. t1 = *s0++; t3 = *s1++; \
  424. OP(dst[4], (A*t0 + B*t1 + C*t2 + D*t3));\
  425. t0 = *s0++; t2 = *s1++; \
  426. OP(dst[5], (A*t1 + B*t0 + C*t3 + D*t2));\
  427. t1 = *s0++; t3 = *s1++; \
  428. OP(dst[6], (A*t0 + B*t1 + C*t2 + D*t3));\
  429. t0 = *s0++; t2 = *s1++; \
  430. OP(dst[7], (A*t1 + B*t0 + C*t3 + D*t2));\
  431. dst+= stride;\
  432. src+= stride;\
  433. }while(--h);\
  434. }
  435. #define op_avg(a, b) a = (((a)+(((b) + 32)>>6)+1)>>1)
  436. #define op_put(a, b) a = (((b) + 32)>>6)
  437. H264_CHROMA_MC(put_ , op_put)
  438. H264_CHROMA_MC(avg_ , op_avg)
  439. #undef op_avg
  440. #undef op_put
  441. #define QPEL_MC(r, OPNAME, RND, OP) \
  442. static void OPNAME ## mpeg4_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  443. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  444. do {\
  445. uint8_t *s = src; \
  446. int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
  447. src0= *s++;\
  448. src1= *s++;\
  449. src2= *s++;\
  450. src3= *s++;\
  451. src4= *s++;\
  452. OP(dst[0], (src0+src1)*20 - (src0+src2)*6 + (src1+src3)*3 - (src2+src4));\
  453. src5= *s++;\
  454. OP(dst[1], (src1+src2)*20 - (src0+src3)*6 + (src0+src4)*3 - (src1+src5));\
  455. src6= *s++;\
  456. OP(dst[2], (src2+src3)*20 - (src1+src4)*6 + (src0+src5)*3 - (src0+src6));\
  457. src7= *s++;\
  458. OP(dst[3], (src3+src4)*20 - (src2+src5)*6 + (src1+src6)*3 - (src0+src7));\
  459. src8= *s++;\
  460. OP(dst[4], (src4+src5)*20 - (src3+src6)*6 + (src2+src7)*3 - (src1+src8));\
  461. OP(dst[5], (src5+src6)*20 - (src4+src7)*6 + (src3+src8)*3 - (src2+src8));\
  462. OP(dst[6], (src6+src7)*20 - (src5+src8)*6 + (src4+src8)*3 - (src3+src7));\
  463. OP(dst[7], (src7+src8)*20 - (src6+src8)*6 + (src5+src7)*3 - (src4+src6));\
  464. dst+=dstStride;\
  465. src+=srcStride;\
  466. }while(--h);\
  467. }\
  468. \
  469. static void OPNAME ## mpeg4_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  470. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  471. int w=8;\
  472. do{\
  473. uint8_t *s = src, *d=dst;\
  474. int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
  475. src0 = *s; s+=srcStride; \
  476. src1 = *s; s+=srcStride; \
  477. src2 = *s; s+=srcStride; \
  478. src3 = *s; s+=srcStride; \
  479. src4 = *s; s+=srcStride; \
  480. OP(*d, (src0+src1)*20 - (src0+src2)*6 + (src1+src3)*3 - (src2+src4));d+=dstStride;\
  481. src5 = *s; s+=srcStride; \
  482. OP(*d, (src1+src2)*20 - (src0+src3)*6 + (src0+src4)*3 - (src1+src5));d+=dstStride;\
  483. src6 = *s; s+=srcStride; \
  484. OP(*d, (src2+src3)*20 - (src1+src4)*6 + (src0+src5)*3 - (src0+src6));d+=dstStride;\
  485. src7 = *s; s+=srcStride; \
  486. OP(*d, (src3+src4)*20 - (src2+src5)*6 + (src1+src6)*3 - (src0+src7));d+=dstStride;\
  487. src8 = *s; \
  488. OP(*d, (src4+src5)*20 - (src3+src6)*6 + (src2+src7)*3 - (src1+src8));d+=dstStride;\
  489. OP(*d, (src5+src6)*20 - (src4+src7)*6 + (src3+src8)*3 - (src2+src8));d+=dstStride;\
  490. OP(*d, (src6+src7)*20 - (src5+src8)*6 + (src4+src8)*3 - (src3+src7));d+=dstStride;\
  491. OP(*d, (src7+src8)*20 - (src6+src8)*6 + (src5+src7)*3 - (src4+src6));\
  492. dst++;\
  493. src++;\
  494. }while(--w);\
  495. }\
  496. \
  497. static void OPNAME ## mpeg4_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  498. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  499. do {\
  500. uint8_t *s = src;\
  501. int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
  502. int src9,src10,src11,src12,src13,src14,src15,src16;\
  503. src0= *s++;\
  504. src1= *s++;\
  505. src2= *s++;\
  506. src3= *s++;\
  507. src4= *s++;\
  508. OP(dst[ 0], (src0 +src1 )*20 - (src0 +src2 )*6 + (src1 +src3 )*3 - (src2 +src4 ));\
  509. src5= *s++;\
  510. OP(dst[ 1], (src1 +src2 )*20 - (src0 +src3 )*6 + (src0 +src4 )*3 - (src1 +src5 ));\
  511. src6= *s++;\
  512. OP(dst[ 2], (src2 +src3 )*20 - (src1 +src4 )*6 + (src0 +src5 )*3 - (src0 +src6 ));\
  513. src7= *s++;\
  514. OP(dst[ 3], (src3 +src4 )*20 - (src2 +src5 )*6 + (src1 +src6 )*3 - (src0 +src7 ));\
  515. src8= *s++;\
  516. OP(dst[ 4], (src4 +src5 )*20 - (src3 +src6 )*6 + (src2 +src7 )*3 - (src1 +src8 ));\
  517. src9= *s++;\
  518. OP(dst[ 5], (src5 +src6 )*20 - (src4 +src7 )*6 + (src3 +src8 )*3 - (src2 +src9 ));\
  519. src10= *s++;\
  520. OP(dst[ 6], (src6 +src7 )*20 - (src5 +src8 )*6 + (src4 +src9 )*3 - (src3 +src10));\
  521. src11= *s++;\
  522. OP(dst[ 7], (src7 +src8 )*20 - (src6 +src9 )*6 + (src5 +src10)*3 - (src4 +src11));\
  523. src12= *s++;\
  524. OP(dst[ 8], (src8 +src9 )*20 - (src7 +src10)*6 + (src6 +src11)*3 - (src5 +src12));\
  525. src13= *s++;\
  526. OP(dst[ 9], (src9 +src10)*20 - (src8 +src11)*6 + (src7 +src12)*3 - (src6 +src13));\
  527. src14= *s++;\
  528. OP(dst[10], (src10+src11)*20 - (src9 +src12)*6 + (src8 +src13)*3 - (src7 +src14));\
  529. src15= *s++;\
  530. OP(dst[11], (src11+src12)*20 - (src10+src13)*6 + (src9 +src14)*3 - (src8 +src15));\
  531. src16= *s++;\
  532. OP(dst[12], (src12+src13)*20 - (src11+src14)*6 + (src10+src15)*3 - (src9 +src16));\
  533. OP(dst[13], (src13+src14)*20 - (src12+src15)*6 + (src11+src16)*3 - (src10+src16));\
  534. OP(dst[14], (src14+src15)*20 - (src13+src16)*6 + (src12+src16)*3 - (src11+src15));\
  535. OP(dst[15], (src15+src16)*20 - (src14+src16)*6 + (src13+src15)*3 - (src12+src14));\
  536. dst+=dstStride;\
  537. src+=srcStride;\
  538. }while(--h);\
  539. }\
  540. \
  541. static void OPNAME ## mpeg4_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  542. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  543. int w=16;\
  544. do {\
  545. uint8_t *s = src, *d=dst;\
  546. int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
  547. int src9,src10,src11,src12,src13,src14,src15,src16;\
  548. src0 = *s; s+=srcStride; \
  549. src1 = *s; s+=srcStride; \
  550. src2 = *s; s+=srcStride; \
  551. src3 = *s; s+=srcStride; \
  552. src4 = *s; s+=srcStride; \
  553. OP(*d, (src0 +src1 )*20 - (src0 +src2 )*6 + (src1 +src3 )*3 - (src2 +src4 ));d+=dstStride;\
  554. src5 = *s; s+=srcStride; \
  555. OP(*d, (src1 +src2 )*20 - (src0 +src3 )*6 + (src0 +src4 )*3 - (src1 +src5 ));d+=dstStride;\
  556. src6 = *s; s+=srcStride; \
  557. OP(*d, (src2 +src3 )*20 - (src1 +src4 )*6 + (src0 +src5 )*3 - (src0 +src6 ));d+=dstStride;\
  558. src7 = *s; s+=srcStride; \
  559. OP(*d, (src3 +src4 )*20 - (src2 +src5 )*6 + (src1 +src6 )*3 - (src0 +src7 ));d+=dstStride;\
  560. src8 = *s; s+=srcStride; \
  561. OP(*d, (src4 +src5 )*20 - (src3 +src6 )*6 + (src2 +src7 )*3 - (src1 +src8 ));d+=dstStride;\
  562. src9 = *s; s+=srcStride; \
  563. OP(*d, (src5 +src6 )*20 - (src4 +src7 )*6 + (src3 +src8 )*3 - (src2 +src9 ));d+=dstStride;\
  564. src10 = *s; s+=srcStride; \
  565. OP(*d, (src6 +src7 )*20 - (src5 +src8 )*6 + (src4 +src9 )*3 - (src3 +src10));d+=dstStride;\
  566. src11 = *s; s+=srcStride; \
  567. OP(*d, (src7 +src8 )*20 - (src6 +src9 )*6 + (src5 +src10)*3 - (src4 +src11));d+=dstStride;\
  568. src12 = *s; s+=srcStride; \
  569. OP(*d, (src8 +src9 )*20 - (src7 +src10)*6 + (src6 +src11)*3 - (src5 +src12));d+=dstStride;\
  570. src13 = *s; s+=srcStride; \
  571. OP(*d, (src9 +src10)*20 - (src8 +src11)*6 + (src7 +src12)*3 - (src6 +src13));d+=dstStride;\
  572. src14 = *s; s+=srcStride; \
  573. OP(*d, (src10+src11)*20 - (src9 +src12)*6 + (src8 +src13)*3 - (src7 +src14));d+=dstStride;\
  574. src15 = *s; s+=srcStride; \
  575. OP(*d, (src11+src12)*20 - (src10+src13)*6 + (src9 +src14)*3 - (src8 +src15));d+=dstStride;\
  576. src16 = *s; \
  577. OP(*d, (src12+src13)*20 - (src11+src14)*6 + (src10+src15)*3 - (src9 +src16));d+=dstStride;\
  578. OP(*d, (src13+src14)*20 - (src12+src15)*6 + (src11+src16)*3 - (src10+src16));d+=dstStride;\
  579. OP(*d, (src14+src15)*20 - (src13+src16)*6 + (src12+src16)*3 - (src11+src15));d+=dstStride;\
  580. OP(*d, (src15+src16)*20 - (src14+src16)*6 + (src13+src15)*3 - (src12+src14));\
  581. dst++;\
  582. src++;\
  583. }while(--w);\
  584. }\
  585. \
  586. static void OPNAME ## qpel8_mc00_sh4 (uint8_t *dst, uint8_t *src, int stride){\
  587. OPNAME ## pixels8_c(dst, src, stride, 8);\
  588. }\
  589. \
  590. static void OPNAME ## qpel8_mc10_sh4(uint8_t *dst, uint8_t *src, int stride){\
  591. uint8_t half[64];\
  592. put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
  593. OPNAME ## pixels8_l2_aligned2(dst, src, half, stride, stride, 8, 8);\
  594. }\
  595. \
  596. static void OPNAME ## qpel8_mc20_sh4(uint8_t *dst, uint8_t *src, int stride){\
  597. OPNAME ## mpeg4_qpel8_h_lowpass(dst, src, stride, stride, 8);\
  598. }\
  599. \
  600. static void OPNAME ## qpel8_mc30_sh4(uint8_t *dst, uint8_t *src, int stride){\
  601. uint8_t half[64];\
  602. put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
  603. OPNAME ## pixels8_l2_aligned2(dst, src+1, half, stride, stride, 8, 8);\
  604. }\
  605. \
  606. static void OPNAME ## qpel8_mc01_sh4(uint8_t *dst, uint8_t *src, int stride){\
  607. uint8_t full[16*9];\
  608. uint8_t half[64];\
  609. copy_block9(full, src, 16, stride, 9);\
  610. put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
  611. OPNAME ## pixels8_l2_aligned(dst, full, half, stride, 16, 8, 8);\
  612. }\
  613. \
  614. static void OPNAME ## qpel8_mc02_sh4(uint8_t *dst, uint8_t *src, int stride){\
  615. uint8_t full[16*9];\
  616. copy_block9(full, src, 16, stride, 9);\
  617. OPNAME ## mpeg4_qpel8_v_lowpass(dst, full, stride, 16);\
  618. }\
  619. \
  620. static void OPNAME ## qpel8_mc03_sh4(uint8_t *dst, uint8_t *src, int stride){\
  621. uint8_t full[16*9];\
  622. uint8_t half[64];\
  623. copy_block9(full, src, 16, stride, 9);\
  624. put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
  625. OPNAME ## pixels8_l2_aligned(dst, full+16, half, stride, 16, 8, 8);\
  626. }\
  627. static void OPNAME ## qpel8_mc11_sh4(uint8_t *dst, uint8_t *src, int stride){\
  628. uint8_t full[16*9];\
  629. uint8_t halfH[72];\
  630. uint8_t halfHV[64];\
  631. copy_block9(full, src, 16, stride, 9);\
  632. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  633. put ## RND ## pixels8_l2_aligned(halfH, halfH, full, 8, 8, 16, 9);\
  634. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  635. OPNAME ## pixels8_l2_aligned(dst, halfH, halfHV, stride, 8, 8, 8);\
  636. }\
  637. static void OPNAME ## qpel8_mc31_sh4(uint8_t *dst, uint8_t *src, int stride){\
  638. uint8_t full[16*9];\
  639. uint8_t halfH[72];\
  640. uint8_t halfHV[64];\
  641. copy_block9(full, src, 16, stride, 9);\
  642. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  643. put ## RND ## pixels8_l2_aligned1(halfH, halfH, full+1, 8, 8, 16, 9);\
  644. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  645. OPNAME ## pixels8_l2_aligned(dst, halfH, halfHV, stride, 8, 8, 8);\
  646. }\
  647. static void OPNAME ## qpel8_mc13_sh4(uint8_t *dst, uint8_t *src, int stride){\
  648. uint8_t full[16*9];\
  649. uint8_t halfH[72];\
  650. uint8_t halfHV[64];\
  651. copy_block9(full, src, 16, stride, 9);\
  652. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  653. put ## RND ## pixels8_l2_aligned(halfH, halfH, full, 8, 8, 16, 9);\
  654. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  655. OPNAME ## pixels8_l2_aligned(dst, halfH+8, halfHV, stride, 8, 8, 8);\
  656. }\
  657. static void OPNAME ## qpel8_mc33_sh4(uint8_t *dst, uint8_t *src, int stride){\
  658. uint8_t full[16*9];\
  659. uint8_t halfH[72];\
  660. uint8_t halfHV[64];\
  661. copy_block9(full, src, 16, stride, 9);\
  662. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  663. put ## RND ## pixels8_l2_aligned1(halfH, halfH, full+1, 8, 8, 16, 9);\
  664. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  665. OPNAME ## pixels8_l2_aligned(dst, halfH+8, halfHV, stride, 8, 8, 8);\
  666. }\
  667. static void OPNAME ## qpel8_mc21_sh4(uint8_t *dst, uint8_t *src, int stride){\
  668. uint8_t halfH[72];\
  669. uint8_t halfHV[64];\
  670. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
  671. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  672. OPNAME ## pixels8_l2_aligned(dst, halfH, halfHV, stride, 8, 8, 8);\
  673. }\
  674. static void OPNAME ## qpel8_mc23_sh4(uint8_t *dst, uint8_t *src, int stride){\
  675. uint8_t halfH[72];\
  676. uint8_t halfHV[64];\
  677. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
  678. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  679. OPNAME ## pixels8_l2_aligned(dst, halfH+8, halfHV, stride, 8, 8, 8);\
  680. }\
  681. static void OPNAME ## qpel8_mc12_sh4(uint8_t *dst, uint8_t *src, int stride){\
  682. uint8_t full[16*9];\
  683. uint8_t halfH[72];\
  684. copy_block9(full, src, 16, stride, 9);\
  685. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  686. put ## RND ## pixels8_l2_aligned(halfH, halfH, full, 8, 8, 16, 9);\
  687. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
  688. }\
  689. static void OPNAME ## qpel8_mc32_sh4(uint8_t *dst, uint8_t *src, int stride){\
  690. uint8_t full[16*9];\
  691. uint8_t halfH[72];\
  692. copy_block9(full, src, 16, stride, 9);\
  693. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  694. put ## RND ## pixels8_l2_aligned1(halfH, halfH, full+1, 8, 8, 16, 9);\
  695. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
  696. }\
  697. static void OPNAME ## qpel8_mc22_sh4(uint8_t *dst, uint8_t *src, int stride){\
  698. uint8_t halfH[72];\
  699. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
  700. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
  701. }\
  702. static void OPNAME ## qpel16_mc00_sh4 (uint8_t *dst, uint8_t *src, int stride){\
  703. OPNAME ## pixels16_c(dst, src, stride, 16);\
  704. }\
  705. \
  706. static void OPNAME ## qpel16_mc10_sh4(uint8_t *dst, uint8_t *src, int stride){\
  707. uint8_t half[256];\
  708. put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
  709. OPNAME ## pixels16_l2_aligned2(dst, src, half, stride, stride, 16, 16);\
  710. }\
  711. \
  712. static void OPNAME ## qpel16_mc20_sh4(uint8_t *dst, uint8_t *src, int stride){\
  713. OPNAME ## mpeg4_qpel16_h_lowpass(dst, src, stride, stride, 16);\
  714. }\
  715. \
  716. static void OPNAME ## qpel16_mc30_sh4(uint8_t *dst, uint8_t *src, int stride){\
  717. uint8_t half[256];\
  718. put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
  719. OPNAME ## pixels16_l2_aligned2(dst, src+1, half, stride, stride, 16, 16);\
  720. }\
  721. \
  722. static void OPNAME ## qpel16_mc01_sh4(uint8_t *dst, uint8_t *src, int stride){\
  723. uint8_t full[24*17];\
  724. uint8_t half[256];\
  725. copy_block17(full, src, 24, stride, 17);\
  726. put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
  727. OPNAME ## pixels16_l2_aligned(dst, full, half, stride, 24, 16, 16);\
  728. }\
  729. \
  730. static void OPNAME ## qpel16_mc02_sh4(uint8_t *dst, uint8_t *src, int stride){\
  731. uint8_t full[24*17];\
  732. copy_block17(full, src, 24, stride, 17);\
  733. OPNAME ## mpeg4_qpel16_v_lowpass(dst, full, stride, 24);\
  734. }\
  735. \
  736. static void OPNAME ## qpel16_mc03_sh4(uint8_t *dst, uint8_t *src, int stride){\
  737. uint8_t full[24*17];\
  738. uint8_t half[256];\
  739. copy_block17(full, src, 24, stride, 17);\
  740. put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
  741. OPNAME ## pixels16_l2_aligned(dst, full+24, half, stride, 24, 16, 16);\
  742. }\
  743. static void OPNAME ## qpel16_mc11_sh4(uint8_t *dst, uint8_t *src, int stride){\
  744. uint8_t full[24*17];\
  745. uint8_t halfH[272];\
  746. uint8_t halfHV[256];\
  747. copy_block17(full, src, 24, stride, 17);\
  748. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  749. put ## RND ## pixels16_l2_aligned(halfH, halfH, full, 16, 16, 24, 17);\
  750. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  751. OPNAME ## pixels16_l2_aligned(dst, halfH, halfHV, stride, 16, 16, 16);\
  752. }\
  753. static void OPNAME ## qpel16_mc31_sh4(uint8_t *dst, uint8_t *src, int stride){\
  754. uint8_t full[24*17];\
  755. uint8_t halfH[272];\
  756. uint8_t halfHV[256];\
  757. copy_block17(full, src, 24, stride, 17);\
  758. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  759. put ## RND ## pixels16_l2_aligned1(halfH, halfH, full+1, 16, 16, 24, 17);\
  760. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  761. OPNAME ## pixels16_l2_aligned(dst, halfH, halfHV, stride, 16, 16, 16);\
  762. }\
  763. static void OPNAME ## qpel16_mc13_sh4(uint8_t *dst, uint8_t *src, int stride){\
  764. uint8_t full[24*17];\
  765. uint8_t halfH[272];\
  766. uint8_t halfHV[256];\
  767. copy_block17(full, src, 24, stride, 17);\
  768. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  769. put ## RND ## pixels16_l2_aligned(halfH, halfH, full, 16, 16, 24, 17);\
  770. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  771. OPNAME ## pixels16_l2_aligned(dst, halfH+16, halfHV, stride, 16, 16, 16);\
  772. }\
  773. static void OPNAME ## qpel16_mc33_sh4(uint8_t *dst, uint8_t *src, int stride){\
  774. uint8_t full[24*17];\
  775. uint8_t halfH[272];\
  776. uint8_t halfHV[256];\
  777. copy_block17(full, src, 24, stride, 17);\
  778. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  779. put ## RND ## pixels16_l2_aligned1(halfH, halfH, full+1, 16, 16, 24, 17);\
  780. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  781. OPNAME ## pixels16_l2_aligned(dst, halfH+16, halfHV, stride, 16, 16, 16);\
  782. }\
  783. static void OPNAME ## qpel16_mc21_sh4(uint8_t *dst, uint8_t *src, int stride){\
  784. uint8_t halfH[272];\
  785. uint8_t halfHV[256];\
  786. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
  787. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  788. OPNAME ## pixels16_l2_aligned(dst, halfH, halfHV, stride, 16, 16, 16);\
  789. }\
  790. static void OPNAME ## qpel16_mc23_sh4(uint8_t *dst, uint8_t *src, int stride){\
  791. uint8_t halfH[272];\
  792. uint8_t halfHV[256];\
  793. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
  794. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  795. OPNAME ## pixels16_l2_aligned(dst, halfH+16, halfHV, stride, 16, 16, 16);\
  796. }\
  797. static void OPNAME ## qpel16_mc12_sh4(uint8_t *dst, uint8_t *src, int stride){\
  798. uint8_t full[24*17];\
  799. uint8_t halfH[272];\
  800. copy_block17(full, src, 24, stride, 17);\
  801. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  802. put ## RND ## pixels16_l2_aligned(halfH, halfH, full, 16, 16, 24, 17);\
  803. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
  804. }\
  805. static void OPNAME ## qpel16_mc32_sh4(uint8_t *dst, uint8_t *src, int stride){\
  806. uint8_t full[24*17];\
  807. uint8_t halfH[272];\
  808. copy_block17(full, src, 24, stride, 17);\
  809. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  810. put ## RND ## pixels16_l2_aligned1(halfH, halfH, full+1, 16, 16, 24, 17);\
  811. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
  812. }\
  813. static void OPNAME ## qpel16_mc22_sh4(uint8_t *dst, uint8_t *src, int stride){\
  814. uint8_t halfH[272];\
  815. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
  816. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
  817. }
  818. #define op_avg(a, b) a = (((a)+cm[((b) + 16)>>5]+1)>>1)
  819. #define op_avg_no_rnd(a, b) a = (((a)+cm[((b) + 15)>>5])>>1)
  820. #define op_put(a, b) a = cm[((b) + 16)>>5]
  821. #define op_put_no_rnd(a, b) a = cm[((b) + 15)>>5]
  822. QPEL_MC(0, put_ , _ , op_put)
  823. QPEL_MC(1, put_no_rnd_, _no_rnd_, op_put_no_rnd)
  824. QPEL_MC(0, avg_ , _ , op_avg)
  825. //QPEL_MC(1, avg_no_rnd , _ , op_avg)
  826. #undef op_avg
  827. #undef op_avg_no_rnd
  828. #undef op_put
  829. #undef op_put_no_rnd
  830. static void wmv2_mspel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){
  831. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  832. do{
  833. int src_1,src0,src1,src2,src3,src4,src5,src6,src7,src8,src9;
  834. uint8_t *s = src;
  835. src_1 = s[-1];
  836. src0 = *s++;
  837. src1 = *s++;
  838. src2 = *s++;
  839. dst[0]= cm[(9*(src0 + src1) - (src_1 + src2) + 8)>>4];
  840. src3 = *s++;
  841. dst[1]= cm[(9*(src1 + src2) - (src0 + src3) + 8)>>4];
  842. src4 = *s++;
  843. dst[2]= cm[(9*(src2 + src3) - (src1 + src4) + 8)>>4];
  844. src5 = *s++;
  845. dst[3]= cm[(9*(src3 + src4) - (src2 + src5) + 8)>>4];
  846. src6 = *s++;
  847. dst[4]= cm[(9*(src4 + src5) - (src3 + src6) + 8)>>4];
  848. src7 = *s++;
  849. dst[5]= cm[(9*(src5 + src6) - (src4 + src7) + 8)>>4];
  850. src8 = *s++;
  851. dst[6]= cm[(9*(src6 + src7) - (src5 + src8) + 8)>>4];
  852. src9 = *s++;
  853. dst[7]= cm[(9*(src7 + src8) - (src6 + src9) + 8)>>4];
  854. dst+=dstStride;
  855. src+=srcStride;
  856. }while(--h);
  857. }
  858. static void wmv2_mspel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int w){
  859. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  860. do{
  861. int src_1,src0,src1,src2,src3,src4,src5,src6,src7,src8,src9;
  862. uint8_t *s = src,*d = dst;
  863. src_1 = *(s-srcStride);
  864. src0 = *s; s+=srcStride;
  865. src1 = *s; s+=srcStride;
  866. src2 = *s; s+=srcStride;
  867. *d= cm[(9*(src0 + src1) - (src_1 + src2) + 8)>>4]; d+=dstStride;
  868. src3 = *s; s+=srcStride;
  869. *d= cm[(9*(src1 + src2) - (src0 + src3) + 8)>>4]; d+=dstStride;
  870. src4 = *s; s+=srcStride;
  871. *d= cm[(9*(src2 + src3) - (src1 + src4) + 8)>>4]; d+=dstStride;
  872. src5 = *s; s+=srcStride;
  873. *d= cm[(9*(src3 + src4) - (src2 + src5) + 8)>>4]; d+=dstStride;
  874. src6 = *s; s+=srcStride;
  875. *d= cm[(9*(src4 + src5) - (src3 + src6) + 8)>>4]; d+=dstStride;
  876. src7 = *s; s+=srcStride;
  877. *d= cm[(9*(src5 + src6) - (src4 + src7) + 8)>>4]; d+=dstStride;
  878. src8 = *s; s+=srcStride;
  879. *d= cm[(9*(src6 + src7) - (src5 + src8) + 8)>>4]; d+=dstStride;
  880. src9 = *s;
  881. *d= cm[(9*(src7 + src8) - (src6 + src9) + 8)>>4]; d+=dstStride;
  882. src++;
  883. dst++;
  884. }while(--w);
  885. }
  886. static void put_mspel8_mc00_sh4 (uint8_t *dst, uint8_t *src, int stride){
  887. put_pixels8_c(dst, src, stride, 8);
  888. }
  889. static void put_mspel8_mc10_sh4(uint8_t *dst, uint8_t *src, int stride){
  890. uint8_t half[64];
  891. wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
  892. put_pixels8_l2_aligned2(dst, src, half, stride, stride, 8, 8);
  893. }
  894. static void put_mspel8_mc20_sh4(uint8_t *dst, uint8_t *src, int stride){
  895. wmv2_mspel8_h_lowpass(dst, src, stride, stride, 8);
  896. }
  897. static void put_mspel8_mc30_sh4(uint8_t *dst, uint8_t *src, int stride){
  898. uint8_t half[64];
  899. wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
  900. put_pixels8_l2_aligned2(dst, src+1, half, stride, stride, 8, 8);
  901. }
  902. static void put_mspel8_mc02_sh4(uint8_t *dst, uint8_t *src, int stride){
  903. wmv2_mspel8_v_lowpass(dst, src, stride, stride, 8);
  904. }
  905. static void put_mspel8_mc12_sh4(uint8_t *dst, uint8_t *src, int stride){
  906. uint8_t halfH[88];
  907. uint8_t halfV[64];
  908. uint8_t halfHV[64];
  909. wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
  910. wmv2_mspel8_v_lowpass(halfV, src, 8, stride, 8);
  911. wmv2_mspel8_v_lowpass(halfHV, halfH+8, 8, 8, 8);
  912. put_pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);
  913. }
  914. static void put_mspel8_mc32_sh4(uint8_t *dst, uint8_t *src, int stride){
  915. uint8_t halfH[88];
  916. uint8_t halfV[64];
  917. uint8_t halfHV[64];
  918. wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
  919. wmv2_mspel8_v_lowpass(halfV, src+1, 8, stride, 8);
  920. wmv2_mspel8_v_lowpass(halfHV, halfH+8, 8, 8, 8);
  921. put_pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);
  922. }
  923. static void put_mspel8_mc22_sh4(uint8_t *dst, uint8_t *src, int stride){
  924. uint8_t halfH[88];
  925. wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
  926. wmv2_mspel8_v_lowpass(dst, halfH+8, stride, 8, 8);
  927. }