You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1650 lines
67KB

  1. /*
  2. this is optimized for sh, which have post increment addressing (*p++)
  3. some cpu may be index (p[n]) faster than post increment (*p++)
  4. */
  5. #define LD(adr) *(uint32_t*)(adr)
  6. #define PIXOP2(OPNAME, OP) \
  7. /*static inline void OPNAME ## _no_rnd_pixels8_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  8. {\
  9. do {\
  10. OP(LP(dst ),no_rnd_avg32(LD32(src1 ),LD32(src2 )) ); \
  11. OP(LP(dst+4),no_rnd_avg32(LD32(src1+4),LD32(src2+4)) ); \
  12. src1+=src_stride1; \
  13. src2+=src_stride2; \
  14. dst+=dst_stride; \
  15. } while(--h); \
  16. }\
  17. \
  18. static inline void OPNAME ## _pixels8_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  19. {\
  20. do {\
  21. OP(LP(dst ),rnd_avg32(LD32(src1 ),LD32(src2 )) ); \
  22. OP(LP(dst+4),rnd_avg32(LD32(src1+4),LD32(src2+4)) ); \
  23. src1+=src_stride1; \
  24. src2+=src_stride2; \
  25. dst+=dst_stride; \
  26. } while(--h); \
  27. }\
  28. \
  29. static inline void OPNAME ## _pixels4_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  30. {\
  31. do {\
  32. OP(LP(dst ),rnd_avg32(LD32(src1 ),LD32(src2 )) ); \
  33. src1+=src_stride1; \
  34. src2+=src_stride2; \
  35. dst+=dst_stride; \
  36. } while(--h); \
  37. }\
  38. \
  39. static inline void OPNAME ## _no_rnd_pixels16_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  40. {\
  41. do {\
  42. OP(LP(dst ),no_rnd_avg32(LD32(src1 ),LD32(src2 )) ); \
  43. OP(LP(dst+4),no_rnd_avg32(LD32(src1+4),LD32(src2+4)) ); \
  44. OP(LP(dst+8),no_rnd_avg32(LD32(src1+8),LD32(src2+8)) ); \
  45. OP(LP(dst+12),no_rnd_avg32(LD32(src1+12),LD32(src2+12)) ); \
  46. src1+=src_stride1; \
  47. src2+=src_stride2; \
  48. dst+=dst_stride; \
  49. } while(--h); \
  50. }\
  51. \
  52. static inline void OPNAME ## _pixels16_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  53. {\
  54. do {\
  55. OP(LP(dst ),rnd_avg32(LD32(src1 ),LD32(src2 )) ); \
  56. OP(LP(dst+4),rnd_avg32(LD32(src1+4),LD32(src2+4)) ); \
  57. OP(LP(dst+8),rnd_avg32(LD32(src1+8),LD32(src2+8)) ); \
  58. OP(LP(dst+12),rnd_avg32(LD32(src1+12),LD32(src2+12)) ); \
  59. src1+=src_stride1; \
  60. src2+=src_stride2; \
  61. dst+=dst_stride; \
  62. } while(--h); \
  63. }*/\
  64. \
  65. static inline void OPNAME ## _pixels4_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  66. {\
  67. do {\
  68. OP(LP(dst ),rnd_avg32(LP(src1 ),LP(src2 )) ); \
  69. src1+=src_stride1; \
  70. src2+=src_stride2; \
  71. dst+=dst_stride; \
  72. } while(--h); \
  73. }\
  74. \
  75. static inline void OPNAME ## _pixels4_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  76. {\
  77. do {\
  78. OP(LP(dst ),rnd_avg32(LD32(src1 ),LP(src2 )) ); \
  79. src1+=src_stride1; \
  80. src2+=src_stride2; \
  81. dst+=dst_stride; \
  82. } while(--h); \
  83. }\
  84. \
  85. static inline void OPNAME ## _no_rnd_pixels16_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  86. {\
  87. do {\
  88. OP(LP(dst ),no_rnd_avg32(LD32(src1 ),LP(src2 )) ); \
  89. OP(LP(dst+4),no_rnd_avg32(LD32(src1+4),LP(src2+4)) ); \
  90. OP(LP(dst+8),no_rnd_avg32(LD32(src1+8),LP(src2+8)) ); \
  91. OP(LP(dst+12),no_rnd_avg32(LD32(src1+12),LP(src2+12)) ); \
  92. src1+=src_stride1; \
  93. src2+=src_stride2; \
  94. dst+=dst_stride; \
  95. } while(--h); \
  96. }\
  97. \
  98. static inline void OPNAME ## _pixels16_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  99. {\
  100. do {\
  101. OP(LP(dst ),rnd_avg32(LD32(src1 ),LP(src2 )) ); \
  102. OP(LP(dst+4),rnd_avg32(LD32(src1+4),LP(src2+4)) ); \
  103. OP(LP(dst+8),rnd_avg32(LD32(src1+8),LP(src2+8)) ); \
  104. OP(LP(dst+12),rnd_avg32(LD32(src1+12),LP(src2+12)) ); \
  105. src1+=src_stride1; \
  106. src2+=src_stride2; \
  107. dst+=dst_stride; \
  108. } while(--h); \
  109. }\
  110. \
  111. static inline void OPNAME ## _no_rnd_pixels8_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  112. {\
  113. do { /* onlye src2 aligned */\
  114. OP(LP(dst ),no_rnd_avg32(LD32(src1 ),LP(src2 )) ); \
  115. OP(LP(dst+4),no_rnd_avg32(LD32(src1+4),LP(src2+4)) ); \
  116. src1+=src_stride1; \
  117. src2+=src_stride2; \
  118. dst+=dst_stride; \
  119. } while(--h); \
  120. }\
  121. \
  122. static inline void OPNAME ## _pixels8_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  123. {\
  124. do {\
  125. OP(LP(dst ),rnd_avg32(LD32(src1 ),LP(src2 )) ); \
  126. OP(LP(dst+4),rnd_avg32(LD32(src1+4),LP(src2+4)) ); \
  127. src1+=src_stride1; \
  128. src2+=src_stride2; \
  129. dst+=dst_stride; \
  130. } while(--h); \
  131. }\
  132. \
  133. static inline void OPNAME ## _no_rnd_pixels8_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  134. {\
  135. do {\
  136. OP(LP(dst ),no_rnd_avg32(LP(src1 ),LP(src2 )) ); \
  137. OP(LP(dst+4),no_rnd_avg32(LP(src1+4),LP(src2+4)) ); \
  138. src1+=src_stride1; \
  139. src2+=src_stride2; \
  140. dst+=dst_stride; \
  141. } while(--h); \
  142. }\
  143. \
  144. static inline void OPNAME ## _pixels8_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  145. {\
  146. do {\
  147. OP(LP(dst ),rnd_avg32(LP(src1 ),LP(src2 )) ); \
  148. OP(LP(dst+4),rnd_avg32(LP(src1+4),LP(src2+4)) ); \
  149. src1+=src_stride1; \
  150. src2+=src_stride2; \
  151. dst+=dst_stride; \
  152. } while(--h); \
  153. }\
  154. \
  155. static inline void OPNAME ## _no_rnd_pixels16_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  156. {\
  157. do {\
  158. OP(LP(dst ),no_rnd_avg32(LP(src1 ),LP(src2 )) ); \
  159. OP(LP(dst+4),no_rnd_avg32(LP(src1+4),LP(src2+4)) ); \
  160. OP(LP(dst+8),no_rnd_avg32(LP(src1+8),LP(src2+8)) ); \
  161. OP(LP(dst+12),no_rnd_avg32(LP(src1+12),LP(src2+12)) ); \
  162. src1+=src_stride1; \
  163. src2+=src_stride2; \
  164. dst+=dst_stride; \
  165. } while(--h); \
  166. }\
  167. \
  168. static inline void OPNAME ## _pixels16_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  169. {\
  170. do {\
  171. OP(LP(dst ),rnd_avg32(LP(src1 ),LP(src2 )) ); \
  172. OP(LP(dst+4),rnd_avg32(LP(src1+4),LP(src2+4)) ); \
  173. OP(LP(dst+8),rnd_avg32(LP(src1+8),LP(src2+8)) ); \
  174. OP(LP(dst+12),rnd_avg32(LP(src1+12),LP(src2+12)) ); \
  175. src1+=src_stride1; \
  176. src2+=src_stride2; \
  177. dst+=dst_stride; \
  178. } while(--h); \
  179. }\
  180. \
  181. static inline void OPNAME ## _no_rnd_pixels16_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  182. { OPNAME ## _no_rnd_pixels16_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
  183. \
  184. static inline void OPNAME ## _pixels16_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  185. { OPNAME ## _pixels16_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
  186. \
  187. static inline void OPNAME ## _no_rnd_pixels8_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  188. { OPNAME ## _no_rnd_pixels8_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
  189. \
  190. static inline void OPNAME ## _pixels8_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  191. { OPNAME ## _pixels8_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
  192. \
  193. static inline void OPNAME ## _pixels8_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  194. do { \
  195. uint32_t a0,a1,a2,a3; \
  196. UNPACK(a0,a1,LP(src1),LP(src2)); \
  197. UNPACK(a2,a3,LP(src3),LP(src4)); \
  198. OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
  199. UNPACK(a0,a1,LP(src1+4),LP(src2+4)); \
  200. UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
  201. OP(LP(dst+4),rnd_PACK(a0,a1,a2,a3)); \
  202. src1+=src_stride1;\
  203. src2+=src_stride2;\
  204. src3+=src_stride3;\
  205. src4+=src_stride4;\
  206. dst+=dst_stride;\
  207. } while(--h); \
  208. } \
  209. \
  210. static inline void OPNAME ## _no_rnd_pixels8_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  211. do { \
  212. uint32_t a0,a1,a2,a3; \
  213. UNPACK(a0,a1,LP(src1),LP(src2)); \
  214. UNPACK(a2,a3,LP(src3),LP(src4)); \
  215. OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
  216. UNPACK(a0,a1,LP(src1+4),LP(src2+4)); \
  217. UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
  218. OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
  219. src1+=src_stride1;\
  220. src2+=src_stride2;\
  221. src3+=src_stride3;\
  222. src4+=src_stride4;\
  223. dst+=dst_stride;\
  224. } while(--h); \
  225. } \
  226. \
  227. static inline void OPNAME ## _pixels8_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  228. do { \
  229. uint32_t a0,a1,a2,a3; /* src1 only not aligned */\
  230. UNPACK(a0,a1,LD32(src1),LP(src2)); \
  231. UNPACK(a2,a3,LP(src3),LP(src4)); \
  232. OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
  233. UNPACK(a0,a1,LD32(src1+4),LP(src2+4)); \
  234. UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
  235. OP(LP(dst+4),rnd_PACK(a0,a1,a2,a3)); \
  236. src1+=src_stride1;\
  237. src2+=src_stride2;\
  238. src3+=src_stride3;\
  239. src4+=src_stride4;\
  240. dst+=dst_stride;\
  241. } while(--h); \
  242. } \
  243. \
  244. static inline void OPNAME ## _no_rnd_pixels8_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  245. do { \
  246. uint32_t a0,a1,a2,a3; \
  247. UNPACK(a0,a1,LD32(src1),LP(src2)); \
  248. UNPACK(a2,a3,LP(src3),LP(src4)); \
  249. OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
  250. UNPACK(a0,a1,LD32(src1+4),LP(src2+4)); \
  251. UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
  252. OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
  253. src1+=src_stride1;\
  254. src2+=src_stride2;\
  255. src3+=src_stride3;\
  256. src4+=src_stride4;\
  257. dst+=dst_stride;\
  258. } while(--h); \
  259. } \
  260. \
  261. static inline void OPNAME ## _pixels16_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  262. do { \
  263. uint32_t a0,a1,a2,a3; \
  264. UNPACK(a0,a1,LP(src1),LP(src2)); \
  265. UNPACK(a2,a3,LP(src3),LP(src4)); \
  266. OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
  267. UNPACK(a0,a1,LP(src1+4),LP(src2+4)); \
  268. UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
  269. OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
  270. UNPACK(a0,a1,LP(src1+8),LP(src2+8)); \
  271. UNPACK(a2,a3,LP(src3+8),LP(src4+8)); \
  272. OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
  273. UNPACK(a0,a1,LP(src1+12),LP(src2+12)); \
  274. UNPACK(a2,a3,LP(src3+12),LP(src4+12)); \
  275. OP(LP(dst+12),rnd_PACK(a0,a1,a2,a3)); \
  276. src1+=src_stride1;\
  277. src2+=src_stride2;\
  278. src3+=src_stride3;\
  279. src4+=src_stride4;\
  280. dst+=dst_stride;\
  281. } while(--h); \
  282. } \
  283. \
  284. static inline void OPNAME ## _no_rnd_pixels16_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  285. do { \
  286. uint32_t a0,a1,a2,a3; \
  287. UNPACK(a0,a1,LP(src1),LP(src2)); \
  288. UNPACK(a2,a3,LP(src3),LP(src4)); \
  289. OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
  290. UNPACK(a0,a1,LP(src1+4),LP(src2+4)); \
  291. UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
  292. OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
  293. UNPACK(a0,a1,LP(src1+8),LP(src2+8)); \
  294. UNPACK(a2,a3,LP(src3+8),LP(src4+8)); \
  295. OP(LP(dst+8),no_rnd_PACK(a0,a1,a2,a3)); \
  296. UNPACK(a0,a1,LP(src1+12),LP(src2+12)); \
  297. UNPACK(a2,a3,LP(src3+12),LP(src4+12)); \
  298. OP(LP(dst+12),no_rnd_PACK(a0,a1,a2,a3)); \
  299. src1+=src_stride1;\
  300. src2+=src_stride2;\
  301. src3+=src_stride3;\
  302. src4+=src_stride4;\
  303. dst+=dst_stride;\
  304. } while(--h); \
  305. } \
  306. \
  307. static inline void OPNAME ## _pixels16_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  308. do { /* src1 is unaligned */\
  309. uint32_t a0,a1,a2,a3; \
  310. UNPACK(a0,a1,LD32(src1),LP(src2)); \
  311. UNPACK(a2,a3,LP(src3),LP(src4)); \
  312. OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
  313. UNPACK(a0,a1,LD32(src1+4),LP(src2+4)); \
  314. UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
  315. OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
  316. UNPACK(a0,a1,LD32(src1+8),LP(src2+8)); \
  317. UNPACK(a2,a3,LP(src3+8),LP(src4+8)); \
  318. OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
  319. UNPACK(a0,a1,LD32(src1+12),LP(src2+12)); \
  320. UNPACK(a2,a3,LP(src3+12),LP(src4+12)); \
  321. OP(LP(dst+12),rnd_PACK(a0,a1,a2,a3)); \
  322. src1+=src_stride1;\
  323. src2+=src_stride2;\
  324. src3+=src_stride3;\
  325. src4+=src_stride4;\
  326. dst+=dst_stride;\
  327. } while(--h); \
  328. } \
  329. \
  330. static inline void OPNAME ## _no_rnd_pixels16_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  331. do { \
  332. uint32_t a0,a1,a2,a3; \
  333. UNPACK(a0,a1,LD32(src1),LP(src2)); \
  334. UNPACK(a2,a3,LP(src3),LP(src4)); \
  335. OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
  336. UNPACK(a0,a1,LD32(src1+4),LP(src2+4)); \
  337. UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
  338. OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
  339. UNPACK(a0,a1,LD32(src1+8),LP(src2+8)); \
  340. UNPACK(a2,a3,LP(src3+8),LP(src4+8)); \
  341. OP(LP(dst+8),no_rnd_PACK(a0,a1,a2,a3)); \
  342. UNPACK(a0,a1,LD32(src1+12),LP(src2+12)); \
  343. UNPACK(a2,a3,LP(src3+12),LP(src4+12)); \
  344. OP(LP(dst+12),no_rnd_PACK(a0,a1,a2,a3)); \
  345. src1+=src_stride1;\
  346. src2+=src_stride2;\
  347. src3+=src_stride3;\
  348. src4+=src_stride4;\
  349. dst+=dst_stride;\
  350. } while(--h); \
  351. } \
  352. \
  353. #define op_avg(a, b) a = rnd_avg32(a,b)
  354. #define op_put(a, b) a = b
  355. PIXOP2(avg, op_avg)
  356. PIXOP2(put, op_put)
  357. #undef op_avg
  358. #undef op_put
  359. #define avg2(a,b) ((a+b+1)>>1)
  360. #define avg4(a,b,c,d) ((a+b+c+d+2)>>2)
  361. static void gmc1_c(uint8_t *dst, uint8_t *src, int stride, int h, int x16, int y16, int rounder)
  362. {
  363. const int A=(16-x16)*(16-y16);
  364. const int B=( x16)*(16-y16);
  365. const int C=(16-x16)*( y16);
  366. const int D=( x16)*( y16);
  367. do {
  368. int t0,t1,t2,t3;
  369. uint8_t *s0 = src;
  370. uint8_t *s1 = src+stride;
  371. t0 = *s0++; t2 = *s1++;
  372. t1 = *s0++; t3 = *s1++;
  373. dst[0]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
  374. t0 = *s0++; t2 = *s1++;
  375. dst[1]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
  376. t1 = *s0++; t3 = *s1++;
  377. dst[2]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
  378. t0 = *s0++; t2 = *s1++;
  379. dst[3]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
  380. t1 = *s0++; t3 = *s1++;
  381. dst[4]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
  382. t0 = *s0++; t2 = *s1++;
  383. dst[5]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
  384. t1 = *s0++; t3 = *s1++;
  385. dst[6]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
  386. t0 = *s0++; t2 = *s1++;
  387. dst[7]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
  388. dst+= stride;
  389. src+= stride;
  390. }while(--h);
  391. }
  392. static void gmc_c(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
  393. int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
  394. {
  395. int y, vx, vy;
  396. const int s= 1<<shift;
  397. width--;
  398. height--;
  399. for(y=0; y<h; y++){
  400. int x;
  401. vx= ox;
  402. vy= oy;
  403. for(x=0; x<8; x++){ //XXX FIXME optimize
  404. int src_x, src_y, frac_x, frac_y, index;
  405. src_x= vx>>16;
  406. src_y= vy>>16;
  407. frac_x= src_x&(s-1);
  408. frac_y= src_y&(s-1);
  409. src_x>>=shift;
  410. src_y>>=shift;
  411. if((unsigned)src_x < width){
  412. if((unsigned)src_y < height){
  413. index= src_x + src_y*stride;
  414. dst[y*stride + x]= ( ( src[index ]*(s-frac_x)
  415. + src[index +1]* frac_x )*(s-frac_y)
  416. + ( src[index+stride ]*(s-frac_x)
  417. + src[index+stride+1]* frac_x )* frac_y
  418. + r)>>(shift*2);
  419. }else{
  420. index= src_x + clip(src_y, 0, height)*stride;
  421. dst[y*stride + x]= ( ( src[index ]*(s-frac_x)
  422. + src[index +1]* frac_x )*s
  423. + r)>>(shift*2);
  424. }
  425. }else{
  426. if((unsigned)src_y < height){
  427. index= clip(src_x, 0, width) + src_y*stride;
  428. dst[y*stride + x]= ( ( src[index ]*(s-frac_y)
  429. + src[index+stride ]* frac_y )*s
  430. + r)>>(shift*2);
  431. }else{
  432. index= clip(src_x, 0, width) + clip(src_y, 0, height)*stride;
  433. dst[y*stride + x]= src[index ];
  434. }
  435. }
  436. vx+= dxx;
  437. vy+= dyx;
  438. }
  439. ox += dxy;
  440. oy += dyy;
  441. }
  442. }
  443. #define H264_CHROMA_MC(OPNAME, OP)\
  444. static void OPNAME ## h264_chroma_mc2_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
  445. const int A=(8-x)*(8-y);\
  446. const int B=( x)*(8-y);\
  447. const int C=(8-x)*( y);\
  448. const int D=( x)*( y);\
  449. \
  450. assert(x<8 && y<8 && x>=0 && y>=0);\
  451. \
  452. do {\
  453. int t0,t1,t2,t3; \
  454. uint8_t *s0 = src; \
  455. uint8_t *s1 = src+stride; \
  456. t0 = *s0++; t2 = *s1++; \
  457. t1 = *s0++; t3 = *s1++; \
  458. OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
  459. t0 = *s0++; t2 = *s1++; \
  460. OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
  461. dst+= stride;\
  462. src+= stride;\
  463. }while(--h);\
  464. }\
  465. \
  466. static void OPNAME ## h264_chroma_mc4_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
  467. const int A=(8-x)*(8-y);\
  468. const int B=( x)*(8-y);\
  469. const int C=(8-x)*( y);\
  470. const int D=( x)*( y);\
  471. \
  472. assert(x<8 && y<8 && x>=0 && y>=0);\
  473. \
  474. do {\
  475. int t0,t1,t2,t3; \
  476. uint8_t *s0 = src; \
  477. uint8_t *s1 = src+stride; \
  478. t0 = *s0++; t2 = *s1++; \
  479. t1 = *s0++; t3 = *s1++; \
  480. OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
  481. t0 = *s0++; t2 = *s1++; \
  482. OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
  483. t1 = *s0++; t3 = *s1++; \
  484. OP(dst[2], (A*t0 + B*t1 + C*t2 + D*t3));\
  485. t0 = *s0++; t2 = *s1++; \
  486. OP(dst[3], (A*t1 + B*t0 + C*t3 + D*t2));\
  487. dst+= stride;\
  488. src+= stride;\
  489. }while(--h);\
  490. }\
  491. \
  492. static void OPNAME ## h264_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
  493. const int A=(8-x)*(8-y);\
  494. const int B=( x)*(8-y);\
  495. const int C=(8-x)*( y);\
  496. const int D=( x)*( y);\
  497. \
  498. assert(x<8 && y<8 && x>=0 && y>=0);\
  499. \
  500. do {\
  501. int t0,t1,t2,t3; \
  502. uint8_t *s0 = src; \
  503. uint8_t *s1 = src+stride; \
  504. t0 = *s0++; t2 = *s1++; \
  505. t1 = *s0++; t3 = *s1++; \
  506. OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
  507. t0 = *s0++; t2 = *s1++; \
  508. OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
  509. t1 = *s0++; t3 = *s1++; \
  510. OP(dst[2], (A*t0 + B*t1 + C*t2 + D*t3));\
  511. t0 = *s0++; t2 = *s1++; \
  512. OP(dst[3], (A*t1 + B*t0 + C*t3 + D*t2));\
  513. t1 = *s0++; t3 = *s1++; \
  514. OP(dst[4], (A*t0 + B*t1 + C*t2 + D*t3));\
  515. t0 = *s0++; t2 = *s1++; \
  516. OP(dst[5], (A*t1 + B*t0 + C*t3 + D*t2));\
  517. t1 = *s0++; t3 = *s1++; \
  518. OP(dst[6], (A*t0 + B*t1 + C*t2 + D*t3));\
  519. t0 = *s0++; t2 = *s1++; \
  520. OP(dst[7], (A*t1 + B*t0 + C*t3 + D*t2));\
  521. dst+= stride;\
  522. src+= stride;\
  523. }while(--h);\
  524. }
  525. #define op_avg(a, b) a = (((a)+(((b) + 32)>>6)+1)>>1)
  526. #define op_put(a, b) a = (((b) + 32)>>6)
  527. H264_CHROMA_MC(put_ , op_put)
  528. H264_CHROMA_MC(avg_ , op_avg)
  529. #undef op_avg
  530. #undef op_put
  531. /* not yet optimized */
  532. static inline void copy_block4(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
  533. {
  534. int i;
  535. for(i=0; i<h; i++)
  536. {
  537. ST32(dst , LD32(src ));
  538. dst+=dstStride;
  539. src+=srcStride;
  540. }
  541. }
  542. static inline void copy_block8(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
  543. {
  544. int i;
  545. for(i=0; i<h; i++)
  546. {
  547. ST32(dst , LD32(src ));
  548. ST32(dst+4 , LD32(src+4 ));
  549. dst+=dstStride;
  550. src+=srcStride;
  551. }
  552. }
  553. static inline void copy_block16(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
  554. {
  555. int i;
  556. for(i=0; i<h; i++)
  557. {
  558. ST32(dst , LD32(src ));
  559. ST32(dst+4 , LD32(src+4 ));
  560. ST32(dst+8 , LD32(src+8 ));
  561. ST32(dst+12, LD32(src+12));
  562. dst+=dstStride;
  563. src+=srcStride;
  564. }
  565. }
  566. static inline void copy_block17(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
  567. {
  568. int i;
  569. for(i=0; i<h; i++)
  570. {
  571. ST32(dst , LD32(src ));
  572. ST32(dst+4 , LD32(src+4 ));
  573. ST32(dst+8 , LD32(src+8 ));
  574. ST32(dst+12, LD32(src+12));
  575. dst[16]= src[16];
  576. dst+=dstStride;
  577. src+=srcStride;
  578. }
  579. }
  580. static inline void copy_block9(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
  581. {
  582. int i;
  583. for(i=0; i<h; i++)
  584. {
  585. ST32(dst , LD32(src ));
  586. ST32(dst+4 , LD32(src+4 ));
  587. dst[8]= src[8];
  588. dst+=dstStride;
  589. src+=srcStride;
  590. }
  591. }
  592. /* end not optimized */
  593. #define QPEL_MC(r, OPNAME, RND, OP) \
  594. static void OPNAME ## mpeg4_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  595. uint8_t *cm = cropTbl + MAX_NEG_CROP;\
  596. do {\
  597. uint8_t *s = src; \
  598. int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
  599. src0= *s++;\
  600. src1= *s++;\
  601. src2= *s++;\
  602. src3= *s++;\
  603. src4= *s++;\
  604. OP(dst[0], (src0+src1)*20 - (src0+src2)*6 + (src1+src3)*3 - (src2+src4));\
  605. src5= *s++;\
  606. OP(dst[1], (src1+src2)*20 - (src0+src3)*6 + (src0+src4)*3 - (src1+src5));\
  607. src6= *s++;\
  608. OP(dst[2], (src2+src3)*20 - (src1+src4)*6 + (src0+src5)*3 - (src0+src6));\
  609. src7= *s++;\
  610. OP(dst[3], (src3+src4)*20 - (src2+src5)*6 + (src1+src6)*3 - (src0+src7));\
  611. src8= *s++;\
  612. OP(dst[4], (src4+src5)*20 - (src3+src6)*6 + (src2+src7)*3 - (src1+src8));\
  613. OP(dst[5], (src5+src6)*20 - (src4+src7)*6 + (src3+src8)*3 - (src2+src8));\
  614. OP(dst[6], (src6+src7)*20 - (src5+src8)*6 + (src4+src8)*3 - (src3+src7));\
  615. OP(dst[7], (src7+src8)*20 - (src6+src8)*6 + (src5+src7)*3 - (src4+src6));\
  616. dst+=dstStride;\
  617. src+=srcStride;\
  618. }while(--h);\
  619. }\
  620. \
  621. static void OPNAME ## mpeg4_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  622. uint8_t *cm = cropTbl + MAX_NEG_CROP;\
  623. int w=8;\
  624. do{\
  625. uint8_t *s = src, *d=dst;\
  626. int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
  627. src0 = *s; s+=srcStride; \
  628. src1 = *s; s+=srcStride; \
  629. src2 = *s; s+=srcStride; \
  630. src3 = *s; s+=srcStride; \
  631. src4 = *s; s+=srcStride; \
  632. OP(*d, (src0+src1)*20 - (src0+src2)*6 + (src1+src3)*3 - (src2+src4));d+=dstStride;\
  633. src5 = *s; s+=srcStride; \
  634. OP(*d, (src1+src2)*20 - (src0+src3)*6 + (src0+src4)*3 - (src1+src5));d+=dstStride;\
  635. src6 = *s; s+=srcStride; \
  636. OP(*d, (src2+src3)*20 - (src1+src4)*6 + (src0+src5)*3 - (src0+src6));d+=dstStride;\
  637. src7 = *s; s+=srcStride; \
  638. OP(*d, (src3+src4)*20 - (src2+src5)*6 + (src1+src6)*3 - (src0+src7));d+=dstStride;\
  639. src8 = *s; \
  640. OP(*d, (src4+src5)*20 - (src3+src6)*6 + (src2+src7)*3 - (src1+src8));d+=dstStride;\
  641. OP(*d, (src5+src6)*20 - (src4+src7)*6 + (src3+src8)*3 - (src2+src8));d+=dstStride;\
  642. OP(*d, (src6+src7)*20 - (src5+src8)*6 + (src4+src8)*3 - (src3+src7));d+=dstStride;\
  643. OP(*d, (src7+src8)*20 - (src6+src8)*6 + (src5+src7)*3 - (src4+src6));\
  644. dst++;\
  645. src++;\
  646. }while(--w);\
  647. }\
  648. \
  649. static void OPNAME ## mpeg4_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  650. uint8_t *cm = cropTbl + MAX_NEG_CROP;\
  651. do {\
  652. uint8_t *s = src;\
  653. int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
  654. int src9,src10,src11,src12,src13,src14,src15,src16;\
  655. src0= *s++;\
  656. src1= *s++;\
  657. src2= *s++;\
  658. src3= *s++;\
  659. src4= *s++;\
  660. OP(dst[ 0], (src0 +src1 )*20 - (src0 +src2 )*6 + (src1 +src3 )*3 - (src2 +src4 ));\
  661. src5= *s++;\
  662. OP(dst[ 1], (src1 +src2 )*20 - (src0 +src3 )*6 + (src0 +src4 )*3 - (src1 +src5 ));\
  663. src6= *s++;\
  664. OP(dst[ 2], (src2 +src3 )*20 - (src1 +src4 )*6 + (src0 +src5 )*3 - (src0 +src6 ));\
  665. src7= *s++;\
  666. OP(dst[ 3], (src3 +src4 )*20 - (src2 +src5 )*6 + (src1 +src6 )*3 - (src0 +src7 ));\
  667. src8= *s++;\
  668. OP(dst[ 4], (src4 +src5 )*20 - (src3 +src6 )*6 + (src2 +src7 )*3 - (src1 +src8 ));\
  669. src9= *s++;\
  670. OP(dst[ 5], (src5 +src6 )*20 - (src4 +src7 )*6 + (src3 +src8 )*3 - (src2 +src9 ));\
  671. src10= *s++;\
  672. OP(dst[ 6], (src6 +src7 )*20 - (src5 +src8 )*6 + (src4 +src9 )*3 - (src3 +src10));\
  673. src11= *s++;\
  674. OP(dst[ 7], (src7 +src8 )*20 - (src6 +src9 )*6 + (src5 +src10)*3 - (src4 +src11));\
  675. src12= *s++;\
  676. OP(dst[ 8], (src8 +src9 )*20 - (src7 +src10)*6 + (src6 +src11)*3 - (src5 +src12));\
  677. src13= *s++;\
  678. OP(dst[ 9], (src9 +src10)*20 - (src8 +src11)*6 + (src7 +src12)*3 - (src6 +src13));\
  679. src14= *s++;\
  680. OP(dst[10], (src10+src11)*20 - (src9 +src12)*6 + (src8 +src13)*3 - (src7 +src14));\
  681. src15= *s++;\
  682. OP(dst[11], (src11+src12)*20 - (src10+src13)*6 + (src9 +src14)*3 - (src8 +src15));\
  683. src16= *s++;\
  684. OP(dst[12], (src12+src13)*20 - (src11+src14)*6 + (src10+src15)*3 - (src9 +src16));\
  685. OP(dst[13], (src13+src14)*20 - (src12+src15)*6 + (src11+src16)*3 - (src10+src16));\
  686. OP(dst[14], (src14+src15)*20 - (src13+src16)*6 + (src12+src16)*3 - (src11+src15));\
  687. OP(dst[15], (src15+src16)*20 - (src14+src16)*6 + (src13+src15)*3 - (src12+src14));\
  688. dst+=dstStride;\
  689. src+=srcStride;\
  690. }while(--h);\
  691. }\
  692. \
  693. static void OPNAME ## mpeg4_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  694. uint8_t *cm = cropTbl + MAX_NEG_CROP;\
  695. int w=16;\
  696. do {\
  697. uint8_t *s = src, *d=dst;\
  698. int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
  699. int src9,src10,src11,src12,src13,src14,src15,src16;\
  700. src0 = *s; s+=srcStride; \
  701. src1 = *s; s+=srcStride; \
  702. src2 = *s; s+=srcStride; \
  703. src3 = *s; s+=srcStride; \
  704. src4 = *s; s+=srcStride; \
  705. OP(*d, (src0 +src1 )*20 - (src0 +src2 )*6 + (src1 +src3 )*3 - (src2 +src4 ));d+=dstStride;\
  706. src5 = *s; s+=srcStride; \
  707. OP(*d, (src1 +src2 )*20 - (src0 +src3 )*6 + (src0 +src4 )*3 - (src1 +src5 ));d+=dstStride;\
  708. src6 = *s; s+=srcStride; \
  709. OP(*d, (src2 +src3 )*20 - (src1 +src4 )*6 + (src0 +src5 )*3 - (src0 +src6 ));d+=dstStride;\
  710. src7 = *s; s+=srcStride; \
  711. OP(*d, (src3 +src4 )*20 - (src2 +src5 )*6 + (src1 +src6 )*3 - (src0 +src7 ));d+=dstStride;\
  712. src8 = *s; s+=srcStride; \
  713. OP(*d, (src4 +src5 )*20 - (src3 +src6 )*6 + (src2 +src7 )*3 - (src1 +src8 ));d+=dstStride;\
  714. src9 = *s; s+=srcStride; \
  715. OP(*d, (src5 +src6 )*20 - (src4 +src7 )*6 + (src3 +src8 )*3 - (src2 +src9 ));d+=dstStride;\
  716. src10 = *s; s+=srcStride; \
  717. OP(*d, (src6 +src7 )*20 - (src5 +src8 )*6 + (src4 +src9 )*3 - (src3 +src10));d+=dstStride;\
  718. src11 = *s; s+=srcStride; \
  719. OP(*d, (src7 +src8 )*20 - (src6 +src9 )*6 + (src5 +src10)*3 - (src4 +src11));d+=dstStride;\
  720. src12 = *s; s+=srcStride; \
  721. OP(*d, (src8 +src9 )*20 - (src7 +src10)*6 + (src6 +src11)*3 - (src5 +src12));d+=dstStride;\
  722. src13 = *s; s+=srcStride; \
  723. OP(*d, (src9 +src10)*20 - (src8 +src11)*6 + (src7 +src12)*3 - (src6 +src13));d+=dstStride;\
  724. src14 = *s; s+=srcStride; \
  725. OP(*d, (src10+src11)*20 - (src9 +src12)*6 + (src8 +src13)*3 - (src7 +src14));d+=dstStride;\
  726. src15 = *s; s+=srcStride; \
  727. OP(*d, (src11+src12)*20 - (src10+src13)*6 + (src9 +src14)*3 - (src8 +src15));d+=dstStride;\
  728. src16 = *s; \
  729. OP(*d, (src12+src13)*20 - (src11+src14)*6 + (src10+src15)*3 - (src9 +src16));d+=dstStride;\
  730. OP(*d, (src13+src14)*20 - (src12+src15)*6 + (src11+src16)*3 - (src10+src16));d+=dstStride;\
  731. OP(*d, (src14+src15)*20 - (src13+src16)*6 + (src12+src16)*3 - (src11+src15));d+=dstStride;\
  732. OP(*d, (src15+src16)*20 - (src14+src16)*6 + (src13+src15)*3 - (src12+src14));\
  733. dst++;\
  734. src++;\
  735. }while(--w);\
  736. }\
  737. \
  738. static void OPNAME ## qpel8_mc00_c (uint8_t *dst, uint8_t *src, int stride){\
  739. OPNAME ## pixels8_c(dst, src, stride, 8);\
  740. }\
  741. \
  742. static void OPNAME ## qpel8_mc10_c(uint8_t *dst, uint8_t *src, int stride){\
  743. uint8_t half[64];\
  744. put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
  745. OPNAME ## pixels8_l2_aligned2(dst, src, half, stride, stride, 8, 8);\
  746. }\
  747. \
  748. static void OPNAME ## qpel8_mc20_c(uint8_t *dst, uint8_t *src, int stride){\
  749. OPNAME ## mpeg4_qpel8_h_lowpass(dst, src, stride, stride, 8);\
  750. }\
  751. \
  752. static void OPNAME ## qpel8_mc30_c(uint8_t *dst, uint8_t *src, int stride){\
  753. uint8_t half[64];\
  754. put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
  755. OPNAME ## pixels8_l2_aligned2(dst, src+1, half, stride, stride, 8, 8);\
  756. }\
  757. \
  758. static void OPNAME ## qpel8_mc01_c(uint8_t *dst, uint8_t *src, int stride){\
  759. uint8_t full[16*9];\
  760. uint8_t half[64];\
  761. copy_block9(full, src, 16, stride, 9);\
  762. put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
  763. OPNAME ## pixels8_l2_aligned(dst, full, half, stride, 16, 8, 8);\
  764. }\
  765. \
  766. static void OPNAME ## qpel8_mc02_c(uint8_t *dst, uint8_t *src, int stride){\
  767. uint8_t full[16*9];\
  768. copy_block9(full, src, 16, stride, 9);\
  769. OPNAME ## mpeg4_qpel8_v_lowpass(dst, full, stride, 16);\
  770. }\
  771. \
  772. static void OPNAME ## qpel8_mc03_c(uint8_t *dst, uint8_t *src, int stride){\
  773. uint8_t full[16*9];\
  774. uint8_t half[64];\
  775. copy_block9(full, src, 16, stride, 9);\
  776. put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
  777. OPNAME ## pixels8_l2_aligned(dst, full+16, half, stride, 16, 8, 8);\
  778. }\
  779. static void ff_ ## OPNAME ## qpel8_mc11_old_c(uint8_t *dst, uint8_t *src, int stride){\
  780. uint8_t full[16*9];\
  781. uint8_t halfH[72];\
  782. uint8_t halfV[64];\
  783. uint8_t halfHV[64];\
  784. copy_block9(full, src, 16, stride, 9);\
  785. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  786. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
  787. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  788. OPNAME ## pixels8_l4_aligned(dst, full, halfH, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
  789. }\
  790. static void OPNAME ## qpel8_mc11_c(uint8_t *dst, uint8_t *src, int stride){\
  791. uint8_t full[16*9];\
  792. uint8_t halfH[72];\
  793. uint8_t halfHV[64];\
  794. copy_block9(full, src, 16, stride, 9);\
  795. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  796. put ## RND ## pixels8_l2_aligned(halfH, halfH, full, 8, 8, 16, 9);\
  797. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  798. OPNAME ## pixels8_l2_aligned(dst, halfH, halfHV, stride, 8, 8, 8);\
  799. }\
  800. static void ff_ ## OPNAME ## qpel8_mc31_old_c(uint8_t *dst, uint8_t *src, int stride){\
  801. uint8_t full[16*9];\
  802. uint8_t halfH[72];\
  803. uint8_t halfV[64];\
  804. uint8_t halfHV[64];\
  805. copy_block9(full, src, 16, stride, 9);\
  806. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  807. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
  808. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  809. OPNAME ## pixels8_l4_aligned0(dst, full+1, halfH, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
  810. }\
  811. static void OPNAME ## qpel8_mc31_c(uint8_t *dst, uint8_t *src, int stride){\
  812. uint8_t full[16*9];\
  813. uint8_t halfH[72];\
  814. uint8_t halfHV[64];\
  815. copy_block9(full, src, 16, stride, 9);\
  816. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  817. put ## RND ## pixels8_l2_aligned1(halfH, halfH, full+1, 8, 8, 16, 9);\
  818. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  819. OPNAME ## pixels8_l2_aligned(dst, halfH, halfHV, stride, 8, 8, 8);\
  820. }\
  821. static void ff_ ## OPNAME ## qpel8_mc13_old_c(uint8_t *dst, uint8_t *src, int stride){\
  822. uint8_t full[16*9];\
  823. uint8_t halfH[72];\
  824. uint8_t halfV[64];\
  825. uint8_t halfHV[64];\
  826. copy_block9(full, src, 16, stride, 9);\
  827. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  828. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
  829. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  830. OPNAME ## pixels8_l4_aligned(dst, full+16, halfH+8, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
  831. }\
  832. static void OPNAME ## qpel8_mc13_c(uint8_t *dst, uint8_t *src, int stride){\
  833. uint8_t full[16*9];\
  834. uint8_t halfH[72];\
  835. uint8_t halfHV[64];\
  836. copy_block9(full, src, 16, stride, 9);\
  837. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  838. put ## RND ## pixels8_l2_aligned(halfH, halfH, full, 8, 8, 16, 9);\
  839. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  840. OPNAME ## pixels8_l2_aligned(dst, halfH+8, halfHV, stride, 8, 8, 8);\
  841. }\
  842. static void ff_ ## OPNAME ## qpel8_mc33_old_c(uint8_t *dst, uint8_t *src, int stride){\
  843. uint8_t full[16*9];\
  844. uint8_t halfH[72];\
  845. uint8_t halfV[64];\
  846. uint8_t halfHV[64];\
  847. copy_block9(full, src, 16, stride, 9);\
  848. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full , 8, 16, 9);\
  849. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
  850. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  851. OPNAME ## pixels8_l4_aligned0(dst, full+17, halfH+8, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
  852. }\
  853. static void OPNAME ## qpel8_mc33_c(uint8_t *dst, uint8_t *src, int stride){\
  854. uint8_t full[16*9];\
  855. uint8_t halfH[72];\
  856. uint8_t halfHV[64];\
  857. copy_block9(full, src, 16, stride, 9);\
  858. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  859. put ## RND ## pixels8_l2_aligned1(halfH, halfH, full+1, 8, 8, 16, 9);\
  860. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  861. OPNAME ## pixels8_l2_aligned(dst, halfH+8, halfHV, stride, 8, 8, 8);\
  862. }\
  863. static void OPNAME ## qpel8_mc21_c(uint8_t *dst, uint8_t *src, int stride){\
  864. uint8_t halfH[72];\
  865. uint8_t halfHV[64];\
  866. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
  867. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  868. OPNAME ## pixels8_l2_aligned(dst, halfH, halfHV, stride, 8, 8, 8);\
  869. }\
  870. static void OPNAME ## qpel8_mc23_c(uint8_t *dst, uint8_t *src, int stride){\
  871. uint8_t halfH[72];\
  872. uint8_t halfHV[64];\
  873. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
  874. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  875. OPNAME ## pixels8_l2_aligned(dst, halfH+8, halfHV, stride, 8, 8, 8);\
  876. }\
  877. static void ff_ ## OPNAME ## qpel8_mc12_old_c(uint8_t *dst, uint8_t *src, int stride){\
  878. uint8_t full[16*9];\
  879. uint8_t halfH[72];\
  880. uint8_t halfV[64];\
  881. uint8_t halfHV[64];\
  882. copy_block9(full, src, 16, stride, 9);\
  883. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  884. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
  885. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  886. OPNAME ## pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);\
  887. }\
  888. static void OPNAME ## qpel8_mc12_c(uint8_t *dst, uint8_t *src, int stride){\
  889. uint8_t full[16*9];\
  890. uint8_t halfH[72];\
  891. copy_block9(full, src, 16, stride, 9);\
  892. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  893. put ## RND ## pixels8_l2_aligned(halfH, halfH, full, 8, 8, 16, 9);\
  894. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
  895. }\
  896. static void ff_ ## OPNAME ## qpel8_mc32_old_c(uint8_t *dst, uint8_t *src, int stride){\
  897. uint8_t full[16*9];\
  898. uint8_t halfH[72];\
  899. uint8_t halfV[64];\
  900. uint8_t halfHV[64];\
  901. copy_block9(full, src, 16, stride, 9);\
  902. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  903. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
  904. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  905. OPNAME ## pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);\
  906. }\
  907. static void OPNAME ## qpel8_mc32_c(uint8_t *dst, uint8_t *src, int stride){\
  908. uint8_t full[16*9];\
  909. uint8_t halfH[72];\
  910. copy_block9(full, src, 16, stride, 9);\
  911. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  912. put ## RND ## pixels8_l2_aligned1(halfH, halfH, full+1, 8, 8, 16, 9);\
  913. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
  914. }\
  915. static void OPNAME ## qpel8_mc22_c(uint8_t *dst, uint8_t *src, int stride){\
  916. uint8_t halfH[72];\
  917. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
  918. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
  919. }\
  920. static void OPNAME ## qpel16_mc00_c (uint8_t *dst, uint8_t *src, int stride){\
  921. OPNAME ## pixels16_c(dst, src, stride, 16);\
  922. }\
  923. \
  924. static void OPNAME ## qpel16_mc10_c(uint8_t *dst, uint8_t *src, int stride){\
  925. uint8_t half[256];\
  926. put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
  927. OPNAME ## pixels16_l2_aligned2(dst, src, half, stride, stride, 16, 16);\
  928. }\
  929. \
  930. static void OPNAME ## qpel16_mc20_c(uint8_t *dst, uint8_t *src, int stride){\
  931. OPNAME ## mpeg4_qpel16_h_lowpass(dst, src, stride, stride, 16);\
  932. }\
  933. \
  934. static void OPNAME ## qpel16_mc30_c(uint8_t *dst, uint8_t *src, int stride){\
  935. uint8_t half[256];\
  936. put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
  937. OPNAME ## pixels16_l2_aligned2(dst, src+1, half, stride, stride, 16, 16);\
  938. }\
  939. \
  940. static void OPNAME ## qpel16_mc01_c(uint8_t *dst, uint8_t *src, int stride){\
  941. uint8_t full[24*17];\
  942. uint8_t half[256];\
  943. copy_block17(full, src, 24, stride, 17);\
  944. put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
  945. OPNAME ## pixels16_l2_aligned(dst, full, half, stride, 24, 16, 16);\
  946. }\
  947. \
  948. static void OPNAME ## qpel16_mc02_c(uint8_t *dst, uint8_t *src, int stride){\
  949. uint8_t full[24*17];\
  950. copy_block17(full, src, 24, stride, 17);\
  951. OPNAME ## mpeg4_qpel16_v_lowpass(dst, full, stride, 24);\
  952. }\
  953. \
  954. static void OPNAME ## qpel16_mc03_c(uint8_t *dst, uint8_t *src, int stride){\
  955. uint8_t full[24*17];\
  956. uint8_t half[256];\
  957. copy_block17(full, src, 24, stride, 17);\
  958. put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
  959. OPNAME ## pixels16_l2_aligned(dst, full+24, half, stride, 24, 16, 16);\
  960. }\
  961. static void ff_ ## OPNAME ## qpel16_mc11_old_c(uint8_t *dst, uint8_t *src, int stride){\
  962. uint8_t full[24*17];\
  963. uint8_t halfH[272];\
  964. uint8_t halfV[256];\
  965. uint8_t halfHV[256];\
  966. copy_block17(full, src, 24, stride, 17);\
  967. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  968. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
  969. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  970. OPNAME ## pixels16_l4_aligned(dst, full, halfH, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
  971. }\
  972. static void OPNAME ## qpel16_mc11_c(uint8_t *dst, uint8_t *src, int stride){\
  973. uint8_t full[24*17];\
  974. uint8_t halfH[272];\
  975. uint8_t halfHV[256];\
  976. copy_block17(full, src, 24, stride, 17);\
  977. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  978. put ## RND ## pixels16_l2_aligned(halfH, halfH, full, 16, 16, 24, 17);\
  979. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  980. OPNAME ## pixels16_l2_aligned(dst, halfH, halfHV, stride, 16, 16, 16);\
  981. }\
  982. static void ff_ ## OPNAME ## qpel16_mc31_old_c(uint8_t *dst, uint8_t *src, int stride){\
  983. uint8_t full[24*17];\
  984. uint8_t halfH[272];\
  985. uint8_t halfV[256];\
  986. uint8_t halfHV[256];\
  987. copy_block17(full, src, 24, stride, 17);\
  988. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  989. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
  990. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  991. OPNAME ## pixels16_l4_aligned0(dst, full+1, halfH, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
  992. }\
  993. static void OPNAME ## qpel16_mc31_c(uint8_t *dst, uint8_t *src, int stride){\
  994. uint8_t full[24*17];\
  995. uint8_t halfH[272];\
  996. uint8_t halfHV[256];\
  997. copy_block17(full, src, 24, stride, 17);\
  998. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  999. put ## RND ## pixels16_l2_aligned1(halfH, halfH, full+1, 16, 16, 24, 17);\
  1000. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1001. OPNAME ## pixels16_l2_aligned(dst, halfH, halfHV, stride, 16, 16, 16);\
  1002. }\
  1003. static void ff_ ## OPNAME ## qpel16_mc13_old_c(uint8_t *dst, uint8_t *src, int stride){\
  1004. uint8_t full[24*17];\
  1005. uint8_t halfH[272];\
  1006. uint8_t halfV[256];\
  1007. uint8_t halfHV[256];\
  1008. copy_block17(full, src, 24, stride, 17);\
  1009. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1010. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
  1011. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1012. OPNAME ## pixels16_l4_aligned(dst, full+24, halfH+16, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
  1013. }\
  1014. static void OPNAME ## qpel16_mc13_c(uint8_t *dst, uint8_t *src, int stride){\
  1015. uint8_t full[24*17];\
  1016. uint8_t halfH[272];\
  1017. uint8_t halfHV[256];\
  1018. copy_block17(full, src, 24, stride, 17);\
  1019. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1020. put ## RND ## pixels16_l2_aligned(halfH, halfH, full, 16, 16, 24, 17);\
  1021. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1022. OPNAME ## pixels16_l2_aligned(dst, halfH+16, halfHV, stride, 16, 16, 16);\
  1023. }\
  1024. static void ff_ ## OPNAME ## qpel16_mc33_old_c(uint8_t *dst, uint8_t *src, int stride){\
  1025. uint8_t full[24*17];\
  1026. uint8_t halfH[272];\
  1027. uint8_t halfV[256];\
  1028. uint8_t halfHV[256];\
  1029. copy_block17(full, src, 24, stride, 17);\
  1030. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full , 16, 24, 17);\
  1031. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
  1032. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1033. OPNAME ## pixels16_l4_aligned0(dst, full+25, halfH+16, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
  1034. }\
  1035. static void OPNAME ## qpel16_mc33_c(uint8_t *dst, uint8_t *src, int stride){\
  1036. uint8_t full[24*17];\
  1037. uint8_t halfH[272];\
  1038. uint8_t halfHV[256];\
  1039. copy_block17(full, src, 24, stride, 17);\
  1040. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1041. put ## RND ## pixels16_l2_aligned1(halfH, halfH, full+1, 16, 16, 24, 17);\
  1042. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1043. OPNAME ## pixels16_l2_aligned(dst, halfH+16, halfHV, stride, 16, 16, 16);\
  1044. }\
  1045. static void OPNAME ## qpel16_mc21_c(uint8_t *dst, uint8_t *src, int stride){\
  1046. uint8_t halfH[272];\
  1047. uint8_t halfHV[256];\
  1048. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
  1049. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1050. OPNAME ## pixels16_l2_aligned(dst, halfH, halfHV, stride, 16, 16, 16);\
  1051. }\
  1052. static void OPNAME ## qpel16_mc23_c(uint8_t *dst, uint8_t *src, int stride){\
  1053. uint8_t halfH[272];\
  1054. uint8_t halfHV[256];\
  1055. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
  1056. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1057. OPNAME ## pixels16_l2_aligned(dst, halfH+16, halfHV, stride, 16, 16, 16);\
  1058. }\
  1059. static void ff_ ## OPNAME ## qpel16_mc12_old_c(uint8_t *dst, uint8_t *src, int stride){\
  1060. uint8_t full[24*17];\
  1061. uint8_t halfH[272];\
  1062. uint8_t halfV[256];\
  1063. uint8_t halfHV[256];\
  1064. copy_block17(full, src, 24, stride, 17);\
  1065. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1066. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
  1067. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1068. OPNAME ## pixels16_l2_aligned(dst, halfV, halfHV, stride, 16, 16, 16);\
  1069. }\
  1070. static void OPNAME ## qpel16_mc12_c(uint8_t *dst, uint8_t *src, int stride){\
  1071. uint8_t full[24*17];\
  1072. uint8_t halfH[272];\
  1073. copy_block17(full, src, 24, stride, 17);\
  1074. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1075. put ## RND ## pixels16_l2_aligned(halfH, halfH, full, 16, 16, 24, 17);\
  1076. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
  1077. }\
  1078. static void ff_ ## OPNAME ## qpel16_mc32_old_c(uint8_t *dst, uint8_t *src, int stride){\
  1079. uint8_t full[24*17];\
  1080. uint8_t halfH[272];\
  1081. uint8_t halfV[256];\
  1082. uint8_t halfHV[256];\
  1083. copy_block17(full, src, 24, stride, 17);\
  1084. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1085. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
  1086. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1087. OPNAME ## pixels16_l2_aligned(dst, halfV, halfHV, stride, 16, 16, 16);\
  1088. }\
  1089. static void OPNAME ## qpel16_mc32_c(uint8_t *dst, uint8_t *src, int stride){\
  1090. uint8_t full[24*17];\
  1091. uint8_t halfH[272];\
  1092. copy_block17(full, src, 24, stride, 17);\
  1093. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1094. put ## RND ## pixels16_l2_aligned1(halfH, halfH, full+1, 16, 16, 24, 17);\
  1095. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
  1096. }\
  1097. static void OPNAME ## qpel16_mc22_c(uint8_t *dst, uint8_t *src, int stride){\
  1098. uint8_t halfH[272];\
  1099. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
  1100. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
  1101. }
  1102. #define op_avg(a, b) a = (((a)+cm[((b) + 16)>>5]+1)>>1)
  1103. #define op_avg_no_rnd(a, b) a = (((a)+cm[((b) + 15)>>5])>>1)
  1104. #define op_put(a, b) a = cm[((b) + 16)>>5]
  1105. #define op_put_no_rnd(a, b) a = cm[((b) + 15)>>5]
  1106. QPEL_MC(0, put_ , _ , op_put)
  1107. QPEL_MC(1, put_no_rnd_, _no_rnd_, op_put_no_rnd)
  1108. QPEL_MC(0, avg_ , _ , op_avg)
  1109. //QPEL_MC(1, avg_no_rnd , _ , op_avg)
  1110. #undef op_avg
  1111. #undef op_avg_no_rnd
  1112. #undef op_put
  1113. #undef op_put_no_rnd
  1114. #if 1
  1115. #define H264_LOWPASS(OPNAME, OP, OP2) \
  1116. static inline void OPNAME ## h264_qpel_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,int w,int h){\
  1117. uint8_t *cm = cropTbl + MAX_NEG_CROP;\
  1118. do {\
  1119. int srcB,srcA,src0,src1,src2,src3,src4,src5,src6;\
  1120. uint8_t *s = src-2;\
  1121. srcB = *s++;\
  1122. srcA = *s++;\
  1123. src0 = *s++;\
  1124. src1 = *s++;\
  1125. src2 = *s++;\
  1126. src3 = *s++;\
  1127. OP(dst[0], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
  1128. src4 = *s++;\
  1129. OP(dst[1], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
  1130. src5 = *s++;\
  1131. OP(dst[2], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
  1132. src6 = *s++;\
  1133. OP(dst[3], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
  1134. if (w>4) { /* it optimized */ \
  1135. int src7,src8,src9,src10; \
  1136. src7 = *s++;\
  1137. OP(dst[4], (src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
  1138. src8 = *s++;\
  1139. OP(dst[5], (src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
  1140. src9 = *s++;\
  1141. OP(dst[6], (src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
  1142. src10 = *s++;\
  1143. OP(dst[7], (src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
  1144. if (w>8) { \
  1145. int src11,src12,src13,src14,src15,src16,src17,src18; \
  1146. src11 = *s++;\
  1147. OP(dst[8] , (src8 +src9 )*20 - (src7 +src10)*5 + (src6 +src11));\
  1148. src12 = *s++;\
  1149. OP(dst[9] , (src9 +src10)*20 - (src8 +src11)*5 + (src7 +src12));\
  1150. src13 = *s++;\
  1151. OP(dst[10], (src10+src11)*20 - (src9 +src12)*5 + (src8 +src13));\
  1152. src14 = *s++;\
  1153. OP(dst[11], (src11+src12)*20 - (src10+src13)*5 + (src9 +src14));\
  1154. src15 = *s++;\
  1155. OP(dst[12], (src12+src13)*20 - (src11+src14)*5 + (src10+src15));\
  1156. src16 = *s++;\
  1157. OP(dst[13], (src13+src14)*20 - (src12+src15)*5 + (src11+src16));\
  1158. src17 = *s++;\
  1159. OP(dst[14], (src14+src15)*20 - (src13+src16)*5 + (src12+src17));\
  1160. src18 = *s++;\
  1161. OP(dst[15], (src15+src16)*20 - (src14+src17)*5 + (src13+src18));\
  1162. } \
  1163. } \
  1164. dst+=dstStride;\
  1165. src+=srcStride;\
  1166. }while(--h);\
  1167. }\
  1168. \
  1169. static inline void OPNAME ## h264_qpel_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,int w,int h){\
  1170. uint8_t *cm = cropTbl + MAX_NEG_CROP;\
  1171. do{\
  1172. int srcB,srcA,src0,src1,src2,src3,src4,src5,src6;\
  1173. uint8_t *s = src-2*srcStride,*d=dst;\
  1174. srcB = *s; s+=srcStride;\
  1175. srcA = *s; s+=srcStride;\
  1176. src0 = *s; s+=srcStride;\
  1177. src1 = *s; s+=srcStride;\
  1178. src2 = *s; s+=srcStride;\
  1179. src3 = *s; s+=srcStride;\
  1180. OP(*d, (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));d+=dstStride;\
  1181. src4 = *s; s+=srcStride;\
  1182. OP(*d, (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));d+=dstStride;\
  1183. src5 = *s; s+=srcStride;\
  1184. OP(*d, (src2+src3)*20 - (src1+src4)*5 + (src0+src5));d+=dstStride;\
  1185. src6 = *s; s+=srcStride;\
  1186. OP(*d, (src3+src4)*20 - (src2+src5)*5 + (src1+src6));d+=dstStride;\
  1187. if (h>4) { \
  1188. int src7,src8,src9,src10; \
  1189. src7 = *s; s+=srcStride;\
  1190. OP(*d, (src4+src5)*20 - (src3+src6)*5 + (src2+src7));d+=dstStride;\
  1191. src8 = *s; s+=srcStride;\
  1192. OP(*d, (src5+src6)*20 - (src4+src7)*5 + (src3+src8));d+=dstStride;\
  1193. src9 = *s; s+=srcStride;\
  1194. OP(*d, (src6+src7)*20 - (src5+src8)*5 + (src4+src9));d+=dstStride;\
  1195. src10 = *s; s+=srcStride;\
  1196. OP(*d, (src7+src8)*20 - (src6+src9)*5 + (src5+src10));d+=dstStride;\
  1197. if (h>8) { \
  1198. int src11,src12,src13,src14,src15,src16,src17,src18; \
  1199. src11 = *s; s+=srcStride;\
  1200. OP(*d , (src8 +src9 )*20 - (src7 +src10)*5 + (src6 +src11));d+=dstStride;\
  1201. src12 = *s; s+=srcStride;\
  1202. OP(*d , (src9 +src10)*20 - (src8 +src11)*5 + (src7 +src12));d+=dstStride;\
  1203. src13 = *s; s+=srcStride;\
  1204. OP(*d, (src10+src11)*20 - (src9 +src12)*5 + (src8 +src13));d+=dstStride;\
  1205. src14 = *s; s+=srcStride;\
  1206. OP(*d, (src11+src12)*20 - (src10+src13)*5 + (src9 +src14));d+=dstStride;\
  1207. src15 = *s; s+=srcStride;\
  1208. OP(*d, (src12+src13)*20 - (src11+src14)*5 + (src10+src15));d+=dstStride;\
  1209. src16 = *s; s+=srcStride;\
  1210. OP(*d, (src13+src14)*20 - (src12+src15)*5 + (src11+src16));d+=dstStride;\
  1211. src17 = *s; s+=srcStride;\
  1212. OP(*d, (src14+src15)*20 - (src13+src16)*5 + (src12+src17));d+=dstStride;\
  1213. src18 = *s; s+=srcStride;\
  1214. OP(*d, (src15+src16)*20 - (src14+src17)*5 + (src13+src18));d+=dstStride;\
  1215. } \
  1216. } \
  1217. dst++;\
  1218. src++;\
  1219. }while(--w);\
  1220. }\
  1221. \
  1222. static inline void OPNAME ## h264_qpel_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride,int w,int h){\
  1223. uint8_t *cm = cropTbl + MAX_NEG_CROP;\
  1224. int i;\
  1225. src -= 2*srcStride;\
  1226. i= h+5; \
  1227. do {\
  1228. int srcB,srcA,src0,src1,src2,src3,src4,src5,src6;\
  1229. uint8_t *s = src-2;\
  1230. srcB = *s++;\
  1231. srcA = *s++;\
  1232. src0 = *s++;\
  1233. src1 = *s++;\
  1234. src2 = *s++;\
  1235. src3 = *s++;\
  1236. tmp[0] = ((src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
  1237. src4 = *s++;\
  1238. tmp[1] = ((src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
  1239. src5 = *s++;\
  1240. tmp[2] = ((src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
  1241. src6 = *s++;\
  1242. tmp[3] = ((src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
  1243. if (w>4) { /* it optimized */ \
  1244. int src7,src8,src9,src10; \
  1245. src7 = *s++;\
  1246. tmp[4] = ((src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
  1247. src8 = *s++;\
  1248. tmp[5] = ((src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
  1249. src9 = *s++;\
  1250. tmp[6] = ((src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
  1251. src10 = *s++;\
  1252. tmp[7] = ((src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
  1253. if (w>8) { \
  1254. int src11,src12,src13,src14,src15,src16,src17,src18; \
  1255. src11 = *s++;\
  1256. tmp[8] = ((src8 +src9 )*20 - (src7 +src10)*5 + (src6 +src11));\
  1257. src12 = *s++;\
  1258. tmp[9] = ((src9 +src10)*20 - (src8 +src11)*5 + (src7 +src12));\
  1259. src13 = *s++;\
  1260. tmp[10] = ((src10+src11)*20 - (src9 +src12)*5 + (src8 +src13));\
  1261. src14 = *s++;\
  1262. tmp[11] = ((src11+src12)*20 - (src10+src13)*5 + (src9 +src14));\
  1263. src15 = *s++;\
  1264. tmp[12] = ((src12+src13)*20 - (src11+src14)*5 + (src10+src15));\
  1265. src16 = *s++;\
  1266. tmp[13] = ((src13+src14)*20 - (src12+src15)*5 + (src11+src16));\
  1267. src17 = *s++;\
  1268. tmp[14] = ((src14+src15)*20 - (src13+src16)*5 + (src12+src17));\
  1269. src18 = *s++;\
  1270. tmp[15] = ((src15+src16)*20 - (src14+src17)*5 + (src13+src18));\
  1271. } \
  1272. } \
  1273. tmp+=tmpStride;\
  1274. src+=srcStride;\
  1275. }while(--i);\
  1276. tmp -= tmpStride*(h+5-2);\
  1277. i = w; \
  1278. do {\
  1279. int tmpB,tmpA,tmp0,tmp1,tmp2,tmp3,tmp4,tmp5,tmp6;\
  1280. int16_t *s = tmp-2*tmpStride; \
  1281. uint8_t *d=dst;\
  1282. tmpB = *s; s+=tmpStride;\
  1283. tmpA = *s; s+=tmpStride;\
  1284. tmp0 = *s; s+=tmpStride;\
  1285. tmp1 = *s; s+=tmpStride;\
  1286. tmp2 = *s; s+=tmpStride;\
  1287. tmp3 = *s; s+=tmpStride;\
  1288. OP2(*d, (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));d+=dstStride;\
  1289. tmp4 = *s; s+=tmpStride;\
  1290. OP2(*d, (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));d+=dstStride;\
  1291. tmp5 = *s; s+=tmpStride;\
  1292. OP2(*d, (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));d+=dstStride;\
  1293. tmp6 = *s; s+=tmpStride;\
  1294. OP2(*d, (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));d+=dstStride;\
  1295. if (h>4) { \
  1296. int tmp7,tmp8,tmp9,tmp10; \
  1297. tmp7 = *s; s+=tmpStride;\
  1298. OP2(*d, (tmp4+tmp5)*20 - (tmp3+tmp6)*5 + (tmp2+tmp7));d+=dstStride;\
  1299. tmp8 = *s; s+=tmpStride;\
  1300. OP2(*d, (tmp5+tmp6)*20 - (tmp4+tmp7)*5 + (tmp3+tmp8));d+=dstStride;\
  1301. tmp9 = *s; s+=tmpStride;\
  1302. OP2(*d, (tmp6+tmp7)*20 - (tmp5+tmp8)*5 + (tmp4+tmp9));d+=dstStride;\
  1303. tmp10 = *s; s+=tmpStride;\
  1304. OP2(*d, (tmp7+tmp8)*20 - (tmp6+tmp9)*5 + (tmp5+tmp10));d+=dstStride;\
  1305. if (h>8) { \
  1306. int tmp11,tmp12,tmp13,tmp14,tmp15,tmp16,tmp17,tmp18; \
  1307. tmp11 = *s; s+=tmpStride;\
  1308. OP2(*d , (tmp8 +tmp9 )*20 - (tmp7 +tmp10)*5 + (tmp6 +tmp11));d+=dstStride;\
  1309. tmp12 = *s; s+=tmpStride;\
  1310. OP2(*d , (tmp9 +tmp10)*20 - (tmp8 +tmp11)*5 + (tmp7 +tmp12));d+=dstStride;\
  1311. tmp13 = *s; s+=tmpStride;\
  1312. OP2(*d, (tmp10+tmp11)*20 - (tmp9 +tmp12)*5 + (tmp8 +tmp13));d+=dstStride;\
  1313. tmp14 = *s; s+=tmpStride;\
  1314. OP2(*d, (tmp11+tmp12)*20 - (tmp10+tmp13)*5 + (tmp9 +tmp14));d+=dstStride;\
  1315. tmp15 = *s; s+=tmpStride;\
  1316. OP2(*d, (tmp12+tmp13)*20 - (tmp11+tmp14)*5 + (tmp10+tmp15));d+=dstStride;\
  1317. tmp16 = *s; s+=tmpStride;\
  1318. OP2(*d, (tmp13+tmp14)*20 - (tmp12+tmp15)*5 + (tmp11+tmp16));d+=dstStride;\
  1319. tmp17 = *s; s+=tmpStride;\
  1320. OP2(*d, (tmp14+tmp15)*20 - (tmp13+tmp16)*5 + (tmp12+tmp17));d+=dstStride;\
  1321. tmp18 = *s; s+=tmpStride;\
  1322. OP2(*d, (tmp15+tmp16)*20 - (tmp14+tmp17)*5 + (tmp13+tmp18));d+=dstStride;\
  1323. } \
  1324. } \
  1325. dst++;\
  1326. tmp++;\
  1327. }while(--i);\
  1328. }\
  1329. \
  1330. static void OPNAME ## h264_qpel4_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1331. OPNAME ## h264_qpel_h_lowpass(dst,src,dstStride,srcStride,4,4); \
  1332. }\
  1333. static void OPNAME ## h264_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1334. OPNAME ## h264_qpel_h_lowpass(dst,src,dstStride,srcStride,8,8); \
  1335. }\
  1336. static void OPNAME ## h264_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1337. OPNAME ## h264_qpel_h_lowpass(dst,src,dstStride,srcStride,16,16); \
  1338. }\
  1339. \
  1340. static void OPNAME ## h264_qpel4_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1341. OPNAME ## h264_qpel_v_lowpass(dst,src,dstStride,srcStride,4,4); \
  1342. }\
  1343. static void OPNAME ## h264_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1344. OPNAME ## h264_qpel_v_lowpass(dst,src,dstStride,srcStride,8,8); \
  1345. }\
  1346. static void OPNAME ## h264_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1347. OPNAME ## h264_qpel_v_lowpass(dst,src,dstStride,srcStride,16,16); \
  1348. }\
  1349. static void OPNAME ## h264_qpel4_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  1350. OPNAME ## h264_qpel_hv_lowpass(dst,tmp,src,dstStride,tmpStride,srcStride,4,4); \
  1351. }\
  1352. static void OPNAME ## h264_qpel8_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  1353. OPNAME ## h264_qpel_hv_lowpass(dst,tmp,src,dstStride,tmpStride,srcStride,8,8); \
  1354. }\
  1355. static void OPNAME ## h264_qpel16_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  1356. OPNAME ## h264_qpel_hv_lowpass(dst,tmp,src,dstStride,tmpStride,srcStride,16,16); \
  1357. }\
  1358. #define H264_MC(OPNAME, SIZE) \
  1359. static void OPNAME ## h264_qpel ## SIZE ## _mc00_c (uint8_t *dst, uint8_t *src, int stride){\
  1360. OPNAME ## pixels ## SIZE ## _c(dst, src, stride, SIZE);\
  1361. }\
  1362. \
  1363. static void OPNAME ## h264_qpel ## SIZE ## _mc10_c(uint8_t *dst, uint8_t *src, int stride){\
  1364. uint8_t half[SIZE*SIZE];\
  1365. put_h264_qpel ## SIZE ## _h_lowpass(half, src, SIZE, stride);\
  1366. OPNAME ## pixels ## SIZE ## _l2_aligned2(dst, src, half, stride, stride, SIZE, SIZE);\
  1367. }\
  1368. \
  1369. static void OPNAME ## h264_qpel ## SIZE ## _mc20_c(uint8_t *dst, uint8_t *src, int stride){\
  1370. OPNAME ## h264_qpel ## SIZE ## _h_lowpass(dst, src, stride, stride);\
  1371. }\
  1372. \
  1373. static void OPNAME ## h264_qpel ## SIZE ## _mc30_c(uint8_t *dst, uint8_t *src, int stride){\
  1374. uint8_t half[SIZE*SIZE];\
  1375. put_h264_qpel ## SIZE ## _h_lowpass(half, src, SIZE, stride);\
  1376. OPNAME ## pixels ## SIZE ## _l2_aligned2(dst, src+1, half, stride, stride, SIZE, SIZE);\
  1377. }\
  1378. \
  1379. static void OPNAME ## h264_qpel ## SIZE ## _mc01_c(uint8_t *dst, uint8_t *src, int stride){\
  1380. uint8_t full[SIZE*(SIZE+5)];\
  1381. uint8_t * const full_mid= full + SIZE*2;\
  1382. uint8_t half[SIZE*SIZE];\
  1383. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1384. put_h264_qpel ## SIZE ## _v_lowpass(half, full_mid, SIZE, SIZE);\
  1385. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, full_mid, half, stride, SIZE, SIZE, SIZE);\
  1386. }\
  1387. \
  1388. static void OPNAME ## h264_qpel ## SIZE ## _mc02_c(uint8_t *dst, uint8_t *src, int stride){\
  1389. uint8_t full[SIZE*(SIZE+5)];\
  1390. uint8_t * const full_mid= full + SIZE*2;\
  1391. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1392. OPNAME ## h264_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE);\
  1393. }\
  1394. \
  1395. static void OPNAME ## h264_qpel ## SIZE ## _mc03_c(uint8_t *dst, uint8_t *src, int stride){\
  1396. uint8_t full[SIZE*(SIZE+5)];\
  1397. uint8_t * const full_mid= full + SIZE*2;\
  1398. uint8_t half[SIZE*SIZE];\
  1399. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1400. put_h264_qpel ## SIZE ## _v_lowpass(half, full_mid, SIZE, SIZE);\
  1401. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, full_mid+SIZE, half, stride, SIZE, SIZE, SIZE);\
  1402. }\
  1403. \
  1404. static void OPNAME ## h264_qpel ## SIZE ## _mc11_c(uint8_t *dst, uint8_t *src, int stride){\
  1405. uint8_t full[SIZE*(SIZE+5)];\
  1406. uint8_t * const full_mid= full + SIZE*2;\
  1407. uint8_t halfH[SIZE*SIZE];\
  1408. uint8_t halfV[SIZE*SIZE];\
  1409. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
  1410. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1411. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1412. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
  1413. }\
  1414. \
  1415. static void OPNAME ## h264_qpel ## SIZE ## _mc31_c(uint8_t *dst, uint8_t *src, int stride){\
  1416. uint8_t full[SIZE*(SIZE+5)];\
  1417. uint8_t * const full_mid= full + SIZE*2;\
  1418. uint8_t halfH[SIZE*SIZE];\
  1419. uint8_t halfV[SIZE*SIZE];\
  1420. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
  1421. copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
  1422. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1423. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
  1424. }\
  1425. \
  1426. static void OPNAME ## h264_qpel ## SIZE ## _mc13_c(uint8_t *dst, uint8_t *src, int stride){\
  1427. uint8_t full[SIZE*(SIZE+5)];\
  1428. uint8_t * const full_mid= full + SIZE*2;\
  1429. uint8_t halfH[SIZE*SIZE];\
  1430. uint8_t halfV[SIZE*SIZE];\
  1431. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
  1432. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1433. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1434. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
  1435. }\
  1436. \
  1437. static void OPNAME ## h264_qpel ## SIZE ## _mc33_c(uint8_t *dst, uint8_t *src, int stride){\
  1438. uint8_t full[SIZE*(SIZE+5)];\
  1439. uint8_t * const full_mid= full + SIZE*2;\
  1440. uint8_t halfH[SIZE*SIZE];\
  1441. uint8_t halfV[SIZE*SIZE];\
  1442. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
  1443. copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
  1444. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1445. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
  1446. }\
  1447. \
  1448. static void OPNAME ## h264_qpel ## SIZE ## _mc22_c(uint8_t *dst, uint8_t *src, int stride){\
  1449. int16_t tmp[SIZE*(SIZE+5)];\
  1450. OPNAME ## h264_qpel ## SIZE ## _hv_lowpass(dst, tmp, src, stride, SIZE, stride);\
  1451. }\
  1452. \
  1453. static void OPNAME ## h264_qpel ## SIZE ## _mc21_c(uint8_t *dst, uint8_t *src, int stride){\
  1454. int16_t tmp[SIZE*(SIZE+5)];\
  1455. uint8_t halfH[SIZE*SIZE];\
  1456. uint8_t halfHV[SIZE*SIZE];\
  1457. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
  1458. put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
  1459. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfHV, stride, SIZE, SIZE, SIZE);\
  1460. }\
  1461. \
  1462. static void OPNAME ## h264_qpel ## SIZE ## _mc23_c(uint8_t *dst, uint8_t *src, int stride){\
  1463. int16_t tmp[SIZE*(SIZE+5)];\
  1464. uint8_t halfH[SIZE*SIZE];\
  1465. uint8_t halfHV[SIZE*SIZE];\
  1466. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
  1467. put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
  1468. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfHV, stride, SIZE, SIZE, SIZE);\
  1469. }\
  1470. \
  1471. static void OPNAME ## h264_qpel ## SIZE ## _mc12_c(uint8_t *dst, uint8_t *src, int stride){\
  1472. uint8_t full[SIZE*(SIZE+5)];\
  1473. uint8_t * const full_mid= full + SIZE*2;\
  1474. int16_t tmp[SIZE*(SIZE+5)];\
  1475. uint8_t halfV[SIZE*SIZE];\
  1476. uint8_t halfHV[SIZE*SIZE];\
  1477. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1478. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1479. put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
  1480. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfV, halfHV, stride, SIZE, SIZE, SIZE);\
  1481. }\
  1482. \
  1483. static void OPNAME ## h264_qpel ## SIZE ## _mc32_c(uint8_t *dst, uint8_t *src, int stride){\
  1484. uint8_t full[SIZE*(SIZE+5)];\
  1485. uint8_t * const full_mid= full + SIZE*2;\
  1486. int16_t tmp[SIZE*(SIZE+5)];\
  1487. uint8_t halfV[SIZE*SIZE];\
  1488. uint8_t halfHV[SIZE*SIZE];\
  1489. copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
  1490. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1491. put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
  1492. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfV, halfHV, stride, SIZE, SIZE, SIZE);\
  1493. }\
  1494. #define op_avg(a, b) a = (((a)+cm[((b) + 16)>>5]+1)>>1)
  1495. //#define op_avg2(a, b) a = (((a)*w1+cm[((b) + 16)>>5]*w2 + o + 64)>>7)
  1496. #define op_put(a, b) a = cm[((b) + 16)>>5]
  1497. #define op2_avg(a, b) a = (((a)+cm[((b) + 512)>>10]+1)>>1)
  1498. #define op2_put(a, b) a = cm[((b) + 512)>>10]
  1499. H264_LOWPASS(put_ , op_put, op2_put)
  1500. H264_LOWPASS(avg_ , op_avg, op2_avg)
  1501. H264_MC(put_, 4)
  1502. H264_MC(put_, 8)
  1503. H264_MC(put_, 16)
  1504. H264_MC(avg_, 4)
  1505. H264_MC(avg_, 8)
  1506. H264_MC(avg_, 16)
  1507. #undef op_avg
  1508. #undef op_put
  1509. #undef op2_avg
  1510. #undef op2_put
  1511. #endif
  1512. static void wmv2_mspel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){
  1513. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  1514. do{
  1515. int src_1,src0,src1,src2,src3,src4,src5,src6,src7,src8,src9;
  1516. uint8_t *s = src;
  1517. src_1 = s[-1];
  1518. src0 = *s++;
  1519. src1 = *s++;
  1520. src2 = *s++;
  1521. dst[0]= cm[(9*(src0 + src1) - (src_1 + src2) + 8)>>4];
  1522. src3 = *s++;
  1523. dst[1]= cm[(9*(src1 + src2) - (src0 + src3) + 8)>>4];
  1524. src4 = *s++;
  1525. dst[2]= cm[(9*(src2 + src3) - (src1 + src4) + 8)>>4];
  1526. src5 = *s++;
  1527. dst[3]= cm[(9*(src3 + src4) - (src2 + src5) + 8)>>4];
  1528. src6 = *s++;
  1529. dst[4]= cm[(9*(src4 + src5) - (src3 + src6) + 8)>>4];
  1530. src7 = *s++;
  1531. dst[5]= cm[(9*(src5 + src6) - (src4 + src7) + 8)>>4];
  1532. src8 = *s++;
  1533. dst[6]= cm[(9*(src6 + src7) - (src5 + src8) + 8)>>4];
  1534. src9 = *s++;
  1535. dst[7]= cm[(9*(src7 + src8) - (src6 + src9) + 8)>>4];
  1536. dst+=dstStride;
  1537. src+=srcStride;
  1538. }while(--h);
  1539. }
  1540. static void wmv2_mspel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int w){
  1541. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  1542. do{
  1543. int src_1,src0,src1,src2,src3,src4,src5,src6,src7,src8,src9;
  1544. uint8_t *s = src,*d = dst;
  1545. src_1 = *(s-srcStride);
  1546. src0 = *s; s+=srcStride;
  1547. src1 = *s; s+=srcStride;
  1548. src2 = *s; s+=srcStride;
  1549. *d= cm[(9*(src0 + src1) - (src_1 + src2) + 8)>>4]; d+=dstStride;
  1550. src3 = *s; s+=srcStride;
  1551. *d= cm[(9*(src1 + src2) - (src0 + src3) + 8)>>4]; d+=dstStride;
  1552. src4 = *s; s+=srcStride;
  1553. *d= cm[(9*(src2 + src3) - (src1 + src4) + 8)>>4]; d+=dstStride;
  1554. src5 = *s; s+=srcStride;
  1555. *d= cm[(9*(src3 + src4) - (src2 + src5) + 8)>>4]; d+=dstStride;
  1556. src6 = *s; s+=srcStride;
  1557. *d= cm[(9*(src4 + src5) - (src3 + src6) + 8)>>4]; d+=dstStride;
  1558. src7 = *s; s+=srcStride;
  1559. *d= cm[(9*(src5 + src6) - (src4 + src7) + 8)>>4]; d+=dstStride;
  1560. src8 = *s; s+=srcStride;
  1561. *d= cm[(9*(src6 + src7) - (src5 + src8) + 8)>>4]; d+=dstStride;
  1562. src9 = *s;
  1563. *d= cm[(9*(src7 + src8) - (src6 + src9) + 8)>>4]; d+=dstStride;
  1564. src++;
  1565. dst++;
  1566. }while(--w);
  1567. }
  1568. static void put_mspel8_mc00_c (uint8_t *dst, uint8_t *src, int stride){
  1569. put_pixels8_c(dst, src, stride, 8);
  1570. }
  1571. static void put_mspel8_mc10_c(uint8_t *dst, uint8_t *src, int stride){
  1572. uint8_t half[64];
  1573. wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
  1574. put_pixels8_l2_aligned2(dst, src, half, stride, stride, 8, 8);
  1575. }
  1576. static void put_mspel8_mc20_c(uint8_t *dst, uint8_t *src, int stride){
  1577. wmv2_mspel8_h_lowpass(dst, src, stride, stride, 8);
  1578. }
  1579. static void put_mspel8_mc30_c(uint8_t *dst, uint8_t *src, int stride){
  1580. uint8_t half[64];
  1581. wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
  1582. put_pixels8_l2_aligned2(dst, src+1, half, stride, stride, 8, 8);
  1583. }
  1584. static void put_mspel8_mc02_c(uint8_t *dst, uint8_t *src, int stride){
  1585. wmv2_mspel8_v_lowpass(dst, src, stride, stride, 8);
  1586. }
  1587. static void put_mspel8_mc12_c(uint8_t *dst, uint8_t *src, int stride){
  1588. uint8_t halfH[88];
  1589. uint8_t halfV[64];
  1590. uint8_t halfHV[64];
  1591. wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
  1592. wmv2_mspel8_v_lowpass(halfV, src, 8, stride, 8);
  1593. wmv2_mspel8_v_lowpass(halfHV, halfH+8, 8, 8, 8);
  1594. put_pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);
  1595. }
  1596. static void put_mspel8_mc32_c(uint8_t *dst, uint8_t *src, int stride){
  1597. uint8_t halfH[88];
  1598. uint8_t halfV[64];
  1599. uint8_t halfHV[64];
  1600. wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
  1601. wmv2_mspel8_v_lowpass(halfV, src+1, 8, stride, 8);
  1602. wmv2_mspel8_v_lowpass(halfHV, halfH+8, 8, 8, 8);
  1603. put_pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);
  1604. }
  1605. static void put_mspel8_mc22_c(uint8_t *dst, uint8_t *src, int stride){
  1606. uint8_t halfH[88];
  1607. wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
  1608. wmv2_mspel8_v_lowpass(dst, halfH+8, stride, 8, 8);
  1609. }