You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1668 lines
70KB

  1. /*
  2. * This is optimized for sh, which have post increment addressing (*p++).
  3. * Some CPU may be index (p[n]) faster than post increment (*p++).
  4. *
  5. * copyright (c) 2001-2003 BERO <bero@geocities.co.jp>
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #define LD(adr) *(uint32_t*)(adr)
  24. #define PIXOP2(OPNAME, OP) \
  25. /*static inline void OPNAME ## _no_rnd_pixels8_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  26. {\
  27. do {\
  28. OP(LP(dst ),no_rnd_avg32(LD32(src1 ),LD32(src2 )) ); \
  29. OP(LP(dst+4),no_rnd_avg32(LD32(src1+4),LD32(src2+4)) ); \
  30. src1+=src_stride1; \
  31. src2+=src_stride2; \
  32. dst+=dst_stride; \
  33. } while(--h); \
  34. }\
  35. \
  36. static inline void OPNAME ## _pixels8_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  37. {\
  38. do {\
  39. OP(LP(dst ),rnd_avg32(LD32(src1 ),LD32(src2 )) ); \
  40. OP(LP(dst+4),rnd_avg32(LD32(src1+4),LD32(src2+4)) ); \
  41. src1+=src_stride1; \
  42. src2+=src_stride2; \
  43. dst+=dst_stride; \
  44. } while(--h); \
  45. }\
  46. \
  47. static inline void OPNAME ## _pixels4_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  48. {\
  49. do {\
  50. OP(LP(dst ),rnd_avg32(LD32(src1 ),LD32(src2 )) ); \
  51. src1+=src_stride1; \
  52. src2+=src_stride2; \
  53. dst+=dst_stride; \
  54. } while(--h); \
  55. }\
  56. \
  57. static inline void OPNAME ## _no_rnd_pixels16_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  58. {\
  59. do {\
  60. OP(LP(dst ),no_rnd_avg32(LD32(src1 ),LD32(src2 )) ); \
  61. OP(LP(dst+4),no_rnd_avg32(LD32(src1+4),LD32(src2+4)) ); \
  62. OP(LP(dst+8),no_rnd_avg32(LD32(src1+8),LD32(src2+8)) ); \
  63. OP(LP(dst+12),no_rnd_avg32(LD32(src1+12),LD32(src2+12)) ); \
  64. src1+=src_stride1; \
  65. src2+=src_stride2; \
  66. dst+=dst_stride; \
  67. } while(--h); \
  68. }\
  69. \
  70. static inline void OPNAME ## _pixels16_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  71. {\
  72. do {\
  73. OP(LP(dst ),rnd_avg32(LD32(src1 ),LD32(src2 )) ); \
  74. OP(LP(dst+4),rnd_avg32(LD32(src1+4),LD32(src2+4)) ); \
  75. OP(LP(dst+8),rnd_avg32(LD32(src1+8),LD32(src2+8)) ); \
  76. OP(LP(dst+12),rnd_avg32(LD32(src1+12),LD32(src2+12)) ); \
  77. src1+=src_stride1; \
  78. src2+=src_stride2; \
  79. dst+=dst_stride; \
  80. } while(--h); \
  81. }*/\
  82. \
  83. static inline void OPNAME ## _pixels4_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  84. {\
  85. do {\
  86. OP(LP(dst ),rnd_avg32(LP(src1 ),LP(src2 )) ); \
  87. src1+=src_stride1; \
  88. src2+=src_stride2; \
  89. dst+=dst_stride; \
  90. } while(--h); \
  91. }\
  92. \
  93. static inline void OPNAME ## _pixels4_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  94. {\
  95. do {\
  96. OP(LP(dst ),rnd_avg32(LD32(src1 ),LP(src2 )) ); \
  97. src1+=src_stride1; \
  98. src2+=src_stride2; \
  99. dst+=dst_stride; \
  100. } while(--h); \
  101. }\
  102. \
  103. static inline void OPNAME ## _no_rnd_pixels16_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  104. {\
  105. do {\
  106. OP(LP(dst ),no_rnd_avg32(LD32(src1 ),LP(src2 )) ); \
  107. OP(LP(dst+4),no_rnd_avg32(LD32(src1+4),LP(src2+4)) ); \
  108. OP(LP(dst+8),no_rnd_avg32(LD32(src1+8),LP(src2+8)) ); \
  109. OP(LP(dst+12),no_rnd_avg32(LD32(src1+12),LP(src2+12)) ); \
  110. src1+=src_stride1; \
  111. src2+=src_stride2; \
  112. dst+=dst_stride; \
  113. } while(--h); \
  114. }\
  115. \
  116. static inline void OPNAME ## _pixels16_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  117. {\
  118. do {\
  119. OP(LP(dst ),rnd_avg32(LD32(src1 ),LP(src2 )) ); \
  120. OP(LP(dst+4),rnd_avg32(LD32(src1+4),LP(src2+4)) ); \
  121. OP(LP(dst+8),rnd_avg32(LD32(src1+8),LP(src2+8)) ); \
  122. OP(LP(dst+12),rnd_avg32(LD32(src1+12),LP(src2+12)) ); \
  123. src1+=src_stride1; \
  124. src2+=src_stride2; \
  125. dst+=dst_stride; \
  126. } while(--h); \
  127. }\
  128. \
  129. static inline void OPNAME ## _no_rnd_pixels8_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  130. {\
  131. do { /* onlye src2 aligned */\
  132. OP(LP(dst ),no_rnd_avg32(LD32(src1 ),LP(src2 )) ); \
  133. OP(LP(dst+4),no_rnd_avg32(LD32(src1+4),LP(src2+4)) ); \
  134. src1+=src_stride1; \
  135. src2+=src_stride2; \
  136. dst+=dst_stride; \
  137. } while(--h); \
  138. }\
  139. \
  140. static inline void OPNAME ## _pixels8_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  141. {\
  142. do {\
  143. OP(LP(dst ),rnd_avg32(LD32(src1 ),LP(src2 )) ); \
  144. OP(LP(dst+4),rnd_avg32(LD32(src1+4),LP(src2+4)) ); \
  145. src1+=src_stride1; \
  146. src2+=src_stride2; \
  147. dst+=dst_stride; \
  148. } while(--h); \
  149. }\
  150. \
  151. static inline void OPNAME ## _no_rnd_pixels8_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  152. {\
  153. do {\
  154. OP(LP(dst ),no_rnd_avg32(LP(src1 ),LP(src2 )) ); \
  155. OP(LP(dst+4),no_rnd_avg32(LP(src1+4),LP(src2+4)) ); \
  156. src1+=src_stride1; \
  157. src2+=src_stride2; \
  158. dst+=dst_stride; \
  159. } while(--h); \
  160. }\
  161. \
  162. static inline void OPNAME ## _pixels8_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  163. {\
  164. do {\
  165. OP(LP(dst ),rnd_avg32(LP(src1 ),LP(src2 )) ); \
  166. OP(LP(dst+4),rnd_avg32(LP(src1+4),LP(src2+4)) ); \
  167. src1+=src_stride1; \
  168. src2+=src_stride2; \
  169. dst+=dst_stride; \
  170. } while(--h); \
  171. }\
  172. \
  173. static inline void OPNAME ## _no_rnd_pixels16_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  174. {\
  175. do {\
  176. OP(LP(dst ),no_rnd_avg32(LP(src1 ),LP(src2 )) ); \
  177. OP(LP(dst+4),no_rnd_avg32(LP(src1+4),LP(src2+4)) ); \
  178. OP(LP(dst+8),no_rnd_avg32(LP(src1+8),LP(src2+8)) ); \
  179. OP(LP(dst+12),no_rnd_avg32(LP(src1+12),LP(src2+12)) ); \
  180. src1+=src_stride1; \
  181. src2+=src_stride2; \
  182. dst+=dst_stride; \
  183. } while(--h); \
  184. }\
  185. \
  186. static inline void OPNAME ## _pixels16_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  187. {\
  188. do {\
  189. OP(LP(dst ),rnd_avg32(LP(src1 ),LP(src2 )) ); \
  190. OP(LP(dst+4),rnd_avg32(LP(src1+4),LP(src2+4)) ); \
  191. OP(LP(dst+8),rnd_avg32(LP(src1+8),LP(src2+8)) ); \
  192. OP(LP(dst+12),rnd_avg32(LP(src1+12),LP(src2+12)) ); \
  193. src1+=src_stride1; \
  194. src2+=src_stride2; \
  195. dst+=dst_stride; \
  196. } while(--h); \
  197. }\
  198. \
  199. static inline void OPNAME ## _no_rnd_pixels16_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  200. { OPNAME ## _no_rnd_pixels16_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
  201. \
  202. static inline void OPNAME ## _pixels16_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  203. { OPNAME ## _pixels16_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
  204. \
  205. static inline void OPNAME ## _no_rnd_pixels8_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  206. { OPNAME ## _no_rnd_pixels8_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
  207. \
  208. static inline void OPNAME ## _pixels8_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  209. { OPNAME ## _pixels8_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
  210. \
  211. static inline void OPNAME ## _pixels8_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  212. do { \
  213. uint32_t a0,a1,a2,a3; \
  214. UNPACK(a0,a1,LP(src1),LP(src2)); \
  215. UNPACK(a2,a3,LP(src3),LP(src4)); \
  216. OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
  217. UNPACK(a0,a1,LP(src1+4),LP(src2+4)); \
  218. UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
  219. OP(LP(dst+4),rnd_PACK(a0,a1,a2,a3)); \
  220. src1+=src_stride1;\
  221. src2+=src_stride2;\
  222. src3+=src_stride3;\
  223. src4+=src_stride4;\
  224. dst+=dst_stride;\
  225. } while(--h); \
  226. } \
  227. \
  228. static inline void OPNAME ## _no_rnd_pixels8_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  229. do { \
  230. uint32_t a0,a1,a2,a3; \
  231. UNPACK(a0,a1,LP(src1),LP(src2)); \
  232. UNPACK(a2,a3,LP(src3),LP(src4)); \
  233. OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
  234. UNPACK(a0,a1,LP(src1+4),LP(src2+4)); \
  235. UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
  236. OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
  237. src1+=src_stride1;\
  238. src2+=src_stride2;\
  239. src3+=src_stride3;\
  240. src4+=src_stride4;\
  241. dst+=dst_stride;\
  242. } while(--h); \
  243. } \
  244. \
  245. static inline void OPNAME ## _pixels8_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  246. do { \
  247. uint32_t a0,a1,a2,a3; /* src1 only not aligned */\
  248. UNPACK(a0,a1,LD32(src1),LP(src2)); \
  249. UNPACK(a2,a3,LP(src3),LP(src4)); \
  250. OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
  251. UNPACK(a0,a1,LD32(src1+4),LP(src2+4)); \
  252. UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
  253. OP(LP(dst+4),rnd_PACK(a0,a1,a2,a3)); \
  254. src1+=src_stride1;\
  255. src2+=src_stride2;\
  256. src3+=src_stride3;\
  257. src4+=src_stride4;\
  258. dst+=dst_stride;\
  259. } while(--h); \
  260. } \
  261. \
  262. static inline void OPNAME ## _no_rnd_pixels8_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  263. do { \
  264. uint32_t a0,a1,a2,a3; \
  265. UNPACK(a0,a1,LD32(src1),LP(src2)); \
  266. UNPACK(a2,a3,LP(src3),LP(src4)); \
  267. OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
  268. UNPACK(a0,a1,LD32(src1+4),LP(src2+4)); \
  269. UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
  270. OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
  271. src1+=src_stride1;\
  272. src2+=src_stride2;\
  273. src3+=src_stride3;\
  274. src4+=src_stride4;\
  275. dst+=dst_stride;\
  276. } while(--h); \
  277. } \
  278. \
  279. static inline void OPNAME ## _pixels16_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  280. do { \
  281. uint32_t a0,a1,a2,a3; \
  282. UNPACK(a0,a1,LP(src1),LP(src2)); \
  283. UNPACK(a2,a3,LP(src3),LP(src4)); \
  284. OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
  285. UNPACK(a0,a1,LP(src1+4),LP(src2+4)); \
  286. UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
  287. OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
  288. UNPACK(a0,a1,LP(src1+8),LP(src2+8)); \
  289. UNPACK(a2,a3,LP(src3+8),LP(src4+8)); \
  290. OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
  291. UNPACK(a0,a1,LP(src1+12),LP(src2+12)); \
  292. UNPACK(a2,a3,LP(src3+12),LP(src4+12)); \
  293. OP(LP(dst+12),rnd_PACK(a0,a1,a2,a3)); \
  294. src1+=src_stride1;\
  295. src2+=src_stride2;\
  296. src3+=src_stride3;\
  297. src4+=src_stride4;\
  298. dst+=dst_stride;\
  299. } while(--h); \
  300. } \
  301. \
  302. static inline void OPNAME ## _no_rnd_pixels16_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  303. do { \
  304. uint32_t a0,a1,a2,a3; \
  305. UNPACK(a0,a1,LP(src1),LP(src2)); \
  306. UNPACK(a2,a3,LP(src3),LP(src4)); \
  307. OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
  308. UNPACK(a0,a1,LP(src1+4),LP(src2+4)); \
  309. UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
  310. OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
  311. UNPACK(a0,a1,LP(src1+8),LP(src2+8)); \
  312. UNPACK(a2,a3,LP(src3+8),LP(src4+8)); \
  313. OP(LP(dst+8),no_rnd_PACK(a0,a1,a2,a3)); \
  314. UNPACK(a0,a1,LP(src1+12),LP(src2+12)); \
  315. UNPACK(a2,a3,LP(src3+12),LP(src4+12)); \
  316. OP(LP(dst+12),no_rnd_PACK(a0,a1,a2,a3)); \
  317. src1+=src_stride1;\
  318. src2+=src_stride2;\
  319. src3+=src_stride3;\
  320. src4+=src_stride4;\
  321. dst+=dst_stride;\
  322. } while(--h); \
  323. } \
  324. \
  325. static inline void OPNAME ## _pixels16_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  326. do { /* src1 is unaligned */\
  327. uint32_t a0,a1,a2,a3; \
  328. UNPACK(a0,a1,LD32(src1),LP(src2)); \
  329. UNPACK(a2,a3,LP(src3),LP(src4)); \
  330. OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
  331. UNPACK(a0,a1,LD32(src1+4),LP(src2+4)); \
  332. UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
  333. OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
  334. UNPACK(a0,a1,LD32(src1+8),LP(src2+8)); \
  335. UNPACK(a2,a3,LP(src3+8),LP(src4+8)); \
  336. OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
  337. UNPACK(a0,a1,LD32(src1+12),LP(src2+12)); \
  338. UNPACK(a2,a3,LP(src3+12),LP(src4+12)); \
  339. OP(LP(dst+12),rnd_PACK(a0,a1,a2,a3)); \
  340. src1+=src_stride1;\
  341. src2+=src_stride2;\
  342. src3+=src_stride3;\
  343. src4+=src_stride4;\
  344. dst+=dst_stride;\
  345. } while(--h); \
  346. } \
  347. \
  348. static inline void OPNAME ## _no_rnd_pixels16_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  349. do { \
  350. uint32_t a0,a1,a2,a3; \
  351. UNPACK(a0,a1,LD32(src1),LP(src2)); \
  352. UNPACK(a2,a3,LP(src3),LP(src4)); \
  353. OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
  354. UNPACK(a0,a1,LD32(src1+4),LP(src2+4)); \
  355. UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
  356. OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
  357. UNPACK(a0,a1,LD32(src1+8),LP(src2+8)); \
  358. UNPACK(a2,a3,LP(src3+8),LP(src4+8)); \
  359. OP(LP(dst+8),no_rnd_PACK(a0,a1,a2,a3)); \
  360. UNPACK(a0,a1,LD32(src1+12),LP(src2+12)); \
  361. UNPACK(a2,a3,LP(src3+12),LP(src4+12)); \
  362. OP(LP(dst+12),no_rnd_PACK(a0,a1,a2,a3)); \
  363. src1+=src_stride1;\
  364. src2+=src_stride2;\
  365. src3+=src_stride3;\
  366. src4+=src_stride4;\
  367. dst+=dst_stride;\
  368. } while(--h); \
  369. } \
  370. \
  371. #define op_avg(a, b) a = rnd_avg32(a,b)
  372. #define op_put(a, b) a = b
  373. PIXOP2(avg, op_avg)
  374. PIXOP2(put, op_put)
  375. #undef op_avg
  376. #undef op_put
  377. #define avg2(a,b) ((a+b+1)>>1)
  378. #define avg4(a,b,c,d) ((a+b+c+d+2)>>2)
  379. static void gmc1_c(uint8_t *dst, uint8_t *src, int stride, int h, int x16, int y16, int rounder)
  380. {
  381. const int A=(16-x16)*(16-y16);
  382. const int B=( x16)*(16-y16);
  383. const int C=(16-x16)*( y16);
  384. const int D=( x16)*( y16);
  385. do {
  386. int t0,t1,t2,t3;
  387. uint8_t *s0 = src;
  388. uint8_t *s1 = src+stride;
  389. t0 = *s0++; t2 = *s1++;
  390. t1 = *s0++; t3 = *s1++;
  391. dst[0]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
  392. t0 = *s0++; t2 = *s1++;
  393. dst[1]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
  394. t1 = *s0++; t3 = *s1++;
  395. dst[2]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
  396. t0 = *s0++; t2 = *s1++;
  397. dst[3]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
  398. t1 = *s0++; t3 = *s1++;
  399. dst[4]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
  400. t0 = *s0++; t2 = *s1++;
  401. dst[5]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
  402. t1 = *s0++; t3 = *s1++;
  403. dst[6]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
  404. t0 = *s0++; t2 = *s1++;
  405. dst[7]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
  406. dst+= stride;
  407. src+= stride;
  408. }while(--h);
  409. }
  410. static void gmc_c(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
  411. int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
  412. {
  413. int y, vx, vy;
  414. const int s= 1<<shift;
  415. width--;
  416. height--;
  417. for(y=0; y<h; y++){
  418. int x;
  419. vx= ox;
  420. vy= oy;
  421. for(x=0; x<8; x++){ //XXX FIXME optimize
  422. int src_x, src_y, frac_x, frac_y, index;
  423. src_x= vx>>16;
  424. src_y= vy>>16;
  425. frac_x= src_x&(s-1);
  426. frac_y= src_y&(s-1);
  427. src_x>>=shift;
  428. src_y>>=shift;
  429. if((unsigned)src_x < width){
  430. if((unsigned)src_y < height){
  431. index= src_x + src_y*stride;
  432. dst[y*stride + x]= ( ( src[index ]*(s-frac_x)
  433. + src[index +1]* frac_x )*(s-frac_y)
  434. + ( src[index+stride ]*(s-frac_x)
  435. + src[index+stride+1]* frac_x )* frac_y
  436. + r)>>(shift*2);
  437. }else{
  438. index= src_x + clip(src_y, 0, height)*stride;
  439. dst[y*stride + x]= ( ( src[index ]*(s-frac_x)
  440. + src[index +1]* frac_x )*s
  441. + r)>>(shift*2);
  442. }
  443. }else{
  444. if((unsigned)src_y < height){
  445. index= clip(src_x, 0, width) + src_y*stride;
  446. dst[y*stride + x]= ( ( src[index ]*(s-frac_y)
  447. + src[index+stride ]* frac_y )*s
  448. + r)>>(shift*2);
  449. }else{
  450. index= clip(src_x, 0, width) + clip(src_y, 0, height)*stride;
  451. dst[y*stride + x]= src[index ];
  452. }
  453. }
  454. vx+= dxx;
  455. vy+= dyx;
  456. }
  457. ox += dxy;
  458. oy += dyy;
  459. }
  460. }
  461. #define H264_CHROMA_MC(OPNAME, OP)\
  462. static void OPNAME ## h264_chroma_mc2_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
  463. const int A=(8-x)*(8-y);\
  464. const int B=( x)*(8-y);\
  465. const int C=(8-x)*( y);\
  466. const int D=( x)*( y);\
  467. \
  468. assert(x<8 && y<8 && x>=0 && y>=0);\
  469. \
  470. do {\
  471. int t0,t1,t2,t3; \
  472. uint8_t *s0 = src; \
  473. uint8_t *s1 = src+stride; \
  474. t0 = *s0++; t2 = *s1++; \
  475. t1 = *s0++; t3 = *s1++; \
  476. OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
  477. t0 = *s0++; t2 = *s1++; \
  478. OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
  479. dst+= stride;\
  480. src+= stride;\
  481. }while(--h);\
  482. }\
  483. \
  484. static void OPNAME ## h264_chroma_mc4_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
  485. const int A=(8-x)*(8-y);\
  486. const int B=( x)*(8-y);\
  487. const int C=(8-x)*( y);\
  488. const int D=( x)*( y);\
  489. \
  490. assert(x<8 && y<8 && x>=0 && y>=0);\
  491. \
  492. do {\
  493. int t0,t1,t2,t3; \
  494. uint8_t *s0 = src; \
  495. uint8_t *s1 = src+stride; \
  496. t0 = *s0++; t2 = *s1++; \
  497. t1 = *s0++; t3 = *s1++; \
  498. OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
  499. t0 = *s0++; t2 = *s1++; \
  500. OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
  501. t1 = *s0++; t3 = *s1++; \
  502. OP(dst[2], (A*t0 + B*t1 + C*t2 + D*t3));\
  503. t0 = *s0++; t2 = *s1++; \
  504. OP(dst[3], (A*t1 + B*t0 + C*t3 + D*t2));\
  505. dst+= stride;\
  506. src+= stride;\
  507. }while(--h);\
  508. }\
  509. \
  510. static void OPNAME ## h264_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
  511. const int A=(8-x)*(8-y);\
  512. const int B=( x)*(8-y);\
  513. const int C=(8-x)*( y);\
  514. const int D=( x)*( y);\
  515. \
  516. assert(x<8 && y<8 && x>=0 && y>=0);\
  517. \
  518. do {\
  519. int t0,t1,t2,t3; \
  520. uint8_t *s0 = src; \
  521. uint8_t *s1 = src+stride; \
  522. t0 = *s0++; t2 = *s1++; \
  523. t1 = *s0++; t3 = *s1++; \
  524. OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
  525. t0 = *s0++; t2 = *s1++; \
  526. OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
  527. t1 = *s0++; t3 = *s1++; \
  528. OP(dst[2], (A*t0 + B*t1 + C*t2 + D*t3));\
  529. t0 = *s0++; t2 = *s1++; \
  530. OP(dst[3], (A*t1 + B*t0 + C*t3 + D*t2));\
  531. t1 = *s0++; t3 = *s1++; \
  532. OP(dst[4], (A*t0 + B*t1 + C*t2 + D*t3));\
  533. t0 = *s0++; t2 = *s1++; \
  534. OP(dst[5], (A*t1 + B*t0 + C*t3 + D*t2));\
  535. t1 = *s0++; t3 = *s1++; \
  536. OP(dst[6], (A*t0 + B*t1 + C*t2 + D*t3));\
  537. t0 = *s0++; t2 = *s1++; \
  538. OP(dst[7], (A*t1 + B*t0 + C*t3 + D*t2));\
  539. dst+= stride;\
  540. src+= stride;\
  541. }while(--h);\
  542. }
  543. #define op_avg(a, b) a = (((a)+(((b) + 32)>>6)+1)>>1)
  544. #define op_put(a, b) a = (((b) + 32)>>6)
  545. H264_CHROMA_MC(put_ , op_put)
  546. H264_CHROMA_MC(avg_ , op_avg)
  547. #undef op_avg
  548. #undef op_put
  549. /* not yet optimized */
  550. static inline void copy_block4(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
  551. {
  552. int i;
  553. for(i=0; i<h; i++)
  554. {
  555. ST32(dst , LD32(src ));
  556. dst+=dstStride;
  557. src+=srcStride;
  558. }
  559. }
  560. static inline void copy_block8(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
  561. {
  562. int i;
  563. for(i=0; i<h; i++)
  564. {
  565. ST32(dst , LD32(src ));
  566. ST32(dst+4 , LD32(src+4 ));
  567. dst+=dstStride;
  568. src+=srcStride;
  569. }
  570. }
  571. static inline void copy_block16(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
  572. {
  573. int i;
  574. for(i=0; i<h; i++)
  575. {
  576. ST32(dst , LD32(src ));
  577. ST32(dst+4 , LD32(src+4 ));
  578. ST32(dst+8 , LD32(src+8 ));
  579. ST32(dst+12, LD32(src+12));
  580. dst+=dstStride;
  581. src+=srcStride;
  582. }
  583. }
  584. static inline void copy_block17(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
  585. {
  586. int i;
  587. for(i=0; i<h; i++)
  588. {
  589. ST32(dst , LD32(src ));
  590. ST32(dst+4 , LD32(src+4 ));
  591. ST32(dst+8 , LD32(src+8 ));
  592. ST32(dst+12, LD32(src+12));
  593. dst[16]= src[16];
  594. dst+=dstStride;
  595. src+=srcStride;
  596. }
  597. }
  598. static inline void copy_block9(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
  599. {
  600. int i;
  601. for(i=0; i<h; i++)
  602. {
  603. ST32(dst , LD32(src ));
  604. ST32(dst+4 , LD32(src+4 ));
  605. dst[8]= src[8];
  606. dst+=dstStride;
  607. src+=srcStride;
  608. }
  609. }
  610. /* end not optimized */
  611. #define QPEL_MC(r, OPNAME, RND, OP) \
  612. static void OPNAME ## mpeg4_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  613. uint8_t *cm = cropTbl + MAX_NEG_CROP;\
  614. do {\
  615. uint8_t *s = src; \
  616. int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
  617. src0= *s++;\
  618. src1= *s++;\
  619. src2= *s++;\
  620. src3= *s++;\
  621. src4= *s++;\
  622. OP(dst[0], (src0+src1)*20 - (src0+src2)*6 + (src1+src3)*3 - (src2+src4));\
  623. src5= *s++;\
  624. OP(dst[1], (src1+src2)*20 - (src0+src3)*6 + (src0+src4)*3 - (src1+src5));\
  625. src6= *s++;\
  626. OP(dst[2], (src2+src3)*20 - (src1+src4)*6 + (src0+src5)*3 - (src0+src6));\
  627. src7= *s++;\
  628. OP(dst[3], (src3+src4)*20 - (src2+src5)*6 + (src1+src6)*3 - (src0+src7));\
  629. src8= *s++;\
  630. OP(dst[4], (src4+src5)*20 - (src3+src6)*6 + (src2+src7)*3 - (src1+src8));\
  631. OP(dst[5], (src5+src6)*20 - (src4+src7)*6 + (src3+src8)*3 - (src2+src8));\
  632. OP(dst[6], (src6+src7)*20 - (src5+src8)*6 + (src4+src8)*3 - (src3+src7));\
  633. OP(dst[7], (src7+src8)*20 - (src6+src8)*6 + (src5+src7)*3 - (src4+src6));\
  634. dst+=dstStride;\
  635. src+=srcStride;\
  636. }while(--h);\
  637. }\
  638. \
  639. static void OPNAME ## mpeg4_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  640. uint8_t *cm = cropTbl + MAX_NEG_CROP;\
  641. int w=8;\
  642. do{\
  643. uint8_t *s = src, *d=dst;\
  644. int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
  645. src0 = *s; s+=srcStride; \
  646. src1 = *s; s+=srcStride; \
  647. src2 = *s; s+=srcStride; \
  648. src3 = *s; s+=srcStride; \
  649. src4 = *s; s+=srcStride; \
  650. OP(*d, (src0+src1)*20 - (src0+src2)*6 + (src1+src3)*3 - (src2+src4));d+=dstStride;\
  651. src5 = *s; s+=srcStride; \
  652. OP(*d, (src1+src2)*20 - (src0+src3)*6 + (src0+src4)*3 - (src1+src5));d+=dstStride;\
  653. src6 = *s; s+=srcStride; \
  654. OP(*d, (src2+src3)*20 - (src1+src4)*6 + (src0+src5)*3 - (src0+src6));d+=dstStride;\
  655. src7 = *s; s+=srcStride; \
  656. OP(*d, (src3+src4)*20 - (src2+src5)*6 + (src1+src6)*3 - (src0+src7));d+=dstStride;\
  657. src8 = *s; \
  658. OP(*d, (src4+src5)*20 - (src3+src6)*6 + (src2+src7)*3 - (src1+src8));d+=dstStride;\
  659. OP(*d, (src5+src6)*20 - (src4+src7)*6 + (src3+src8)*3 - (src2+src8));d+=dstStride;\
  660. OP(*d, (src6+src7)*20 - (src5+src8)*6 + (src4+src8)*3 - (src3+src7));d+=dstStride;\
  661. OP(*d, (src7+src8)*20 - (src6+src8)*6 + (src5+src7)*3 - (src4+src6));\
  662. dst++;\
  663. src++;\
  664. }while(--w);\
  665. }\
  666. \
  667. static void OPNAME ## mpeg4_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  668. uint8_t *cm = cropTbl + MAX_NEG_CROP;\
  669. do {\
  670. uint8_t *s = src;\
  671. int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
  672. int src9,src10,src11,src12,src13,src14,src15,src16;\
  673. src0= *s++;\
  674. src1= *s++;\
  675. src2= *s++;\
  676. src3= *s++;\
  677. src4= *s++;\
  678. OP(dst[ 0], (src0 +src1 )*20 - (src0 +src2 )*6 + (src1 +src3 )*3 - (src2 +src4 ));\
  679. src5= *s++;\
  680. OP(dst[ 1], (src1 +src2 )*20 - (src0 +src3 )*6 + (src0 +src4 )*3 - (src1 +src5 ));\
  681. src6= *s++;\
  682. OP(dst[ 2], (src2 +src3 )*20 - (src1 +src4 )*6 + (src0 +src5 )*3 - (src0 +src6 ));\
  683. src7= *s++;\
  684. OP(dst[ 3], (src3 +src4 )*20 - (src2 +src5 )*6 + (src1 +src6 )*3 - (src0 +src7 ));\
  685. src8= *s++;\
  686. OP(dst[ 4], (src4 +src5 )*20 - (src3 +src6 )*6 + (src2 +src7 )*3 - (src1 +src8 ));\
  687. src9= *s++;\
  688. OP(dst[ 5], (src5 +src6 )*20 - (src4 +src7 )*6 + (src3 +src8 )*3 - (src2 +src9 ));\
  689. src10= *s++;\
  690. OP(dst[ 6], (src6 +src7 )*20 - (src5 +src8 )*6 + (src4 +src9 )*3 - (src3 +src10));\
  691. src11= *s++;\
  692. OP(dst[ 7], (src7 +src8 )*20 - (src6 +src9 )*6 + (src5 +src10)*3 - (src4 +src11));\
  693. src12= *s++;\
  694. OP(dst[ 8], (src8 +src9 )*20 - (src7 +src10)*6 + (src6 +src11)*3 - (src5 +src12));\
  695. src13= *s++;\
  696. OP(dst[ 9], (src9 +src10)*20 - (src8 +src11)*6 + (src7 +src12)*3 - (src6 +src13));\
  697. src14= *s++;\
  698. OP(dst[10], (src10+src11)*20 - (src9 +src12)*6 + (src8 +src13)*3 - (src7 +src14));\
  699. src15= *s++;\
  700. OP(dst[11], (src11+src12)*20 - (src10+src13)*6 + (src9 +src14)*3 - (src8 +src15));\
  701. src16= *s++;\
  702. OP(dst[12], (src12+src13)*20 - (src11+src14)*6 + (src10+src15)*3 - (src9 +src16));\
  703. OP(dst[13], (src13+src14)*20 - (src12+src15)*6 + (src11+src16)*3 - (src10+src16));\
  704. OP(dst[14], (src14+src15)*20 - (src13+src16)*6 + (src12+src16)*3 - (src11+src15));\
  705. OP(dst[15], (src15+src16)*20 - (src14+src16)*6 + (src13+src15)*3 - (src12+src14));\
  706. dst+=dstStride;\
  707. src+=srcStride;\
  708. }while(--h);\
  709. }\
  710. \
  711. static void OPNAME ## mpeg4_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  712. uint8_t *cm = cropTbl + MAX_NEG_CROP;\
  713. int w=16;\
  714. do {\
  715. uint8_t *s = src, *d=dst;\
  716. int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
  717. int src9,src10,src11,src12,src13,src14,src15,src16;\
  718. src0 = *s; s+=srcStride; \
  719. src1 = *s; s+=srcStride; \
  720. src2 = *s; s+=srcStride; \
  721. src3 = *s; s+=srcStride; \
  722. src4 = *s; s+=srcStride; \
  723. OP(*d, (src0 +src1 )*20 - (src0 +src2 )*6 + (src1 +src3 )*3 - (src2 +src4 ));d+=dstStride;\
  724. src5 = *s; s+=srcStride; \
  725. OP(*d, (src1 +src2 )*20 - (src0 +src3 )*6 + (src0 +src4 )*3 - (src1 +src5 ));d+=dstStride;\
  726. src6 = *s; s+=srcStride; \
  727. OP(*d, (src2 +src3 )*20 - (src1 +src4 )*6 + (src0 +src5 )*3 - (src0 +src6 ));d+=dstStride;\
  728. src7 = *s; s+=srcStride; \
  729. OP(*d, (src3 +src4 )*20 - (src2 +src5 )*6 + (src1 +src6 )*3 - (src0 +src7 ));d+=dstStride;\
  730. src8 = *s; s+=srcStride; \
  731. OP(*d, (src4 +src5 )*20 - (src3 +src6 )*6 + (src2 +src7 )*3 - (src1 +src8 ));d+=dstStride;\
  732. src9 = *s; s+=srcStride; \
  733. OP(*d, (src5 +src6 )*20 - (src4 +src7 )*6 + (src3 +src8 )*3 - (src2 +src9 ));d+=dstStride;\
  734. src10 = *s; s+=srcStride; \
  735. OP(*d, (src6 +src7 )*20 - (src5 +src8 )*6 + (src4 +src9 )*3 - (src3 +src10));d+=dstStride;\
  736. src11 = *s; s+=srcStride; \
  737. OP(*d, (src7 +src8 )*20 - (src6 +src9 )*6 + (src5 +src10)*3 - (src4 +src11));d+=dstStride;\
  738. src12 = *s; s+=srcStride; \
  739. OP(*d, (src8 +src9 )*20 - (src7 +src10)*6 + (src6 +src11)*3 - (src5 +src12));d+=dstStride;\
  740. src13 = *s; s+=srcStride; \
  741. OP(*d, (src9 +src10)*20 - (src8 +src11)*6 + (src7 +src12)*3 - (src6 +src13));d+=dstStride;\
  742. src14 = *s; s+=srcStride; \
  743. OP(*d, (src10+src11)*20 - (src9 +src12)*6 + (src8 +src13)*3 - (src7 +src14));d+=dstStride;\
  744. src15 = *s; s+=srcStride; \
  745. OP(*d, (src11+src12)*20 - (src10+src13)*6 + (src9 +src14)*3 - (src8 +src15));d+=dstStride;\
  746. src16 = *s; \
  747. OP(*d, (src12+src13)*20 - (src11+src14)*6 + (src10+src15)*3 - (src9 +src16));d+=dstStride;\
  748. OP(*d, (src13+src14)*20 - (src12+src15)*6 + (src11+src16)*3 - (src10+src16));d+=dstStride;\
  749. OP(*d, (src14+src15)*20 - (src13+src16)*6 + (src12+src16)*3 - (src11+src15));d+=dstStride;\
  750. OP(*d, (src15+src16)*20 - (src14+src16)*6 + (src13+src15)*3 - (src12+src14));\
  751. dst++;\
  752. src++;\
  753. }while(--w);\
  754. }\
  755. \
  756. static void OPNAME ## qpel8_mc00_c (uint8_t *dst, uint8_t *src, int stride){\
  757. OPNAME ## pixels8_c(dst, src, stride, 8);\
  758. }\
  759. \
  760. static void OPNAME ## qpel8_mc10_c(uint8_t *dst, uint8_t *src, int stride){\
  761. uint8_t half[64];\
  762. put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
  763. OPNAME ## pixels8_l2_aligned2(dst, src, half, stride, stride, 8, 8);\
  764. }\
  765. \
  766. static void OPNAME ## qpel8_mc20_c(uint8_t *dst, uint8_t *src, int stride){\
  767. OPNAME ## mpeg4_qpel8_h_lowpass(dst, src, stride, stride, 8);\
  768. }\
  769. \
  770. static void OPNAME ## qpel8_mc30_c(uint8_t *dst, uint8_t *src, int stride){\
  771. uint8_t half[64];\
  772. put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
  773. OPNAME ## pixels8_l2_aligned2(dst, src+1, half, stride, stride, 8, 8);\
  774. }\
  775. \
  776. static void OPNAME ## qpel8_mc01_c(uint8_t *dst, uint8_t *src, int stride){\
  777. uint8_t full[16*9];\
  778. uint8_t half[64];\
  779. copy_block9(full, src, 16, stride, 9);\
  780. put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
  781. OPNAME ## pixels8_l2_aligned(dst, full, half, stride, 16, 8, 8);\
  782. }\
  783. \
  784. static void OPNAME ## qpel8_mc02_c(uint8_t *dst, uint8_t *src, int stride){\
  785. uint8_t full[16*9];\
  786. copy_block9(full, src, 16, stride, 9);\
  787. OPNAME ## mpeg4_qpel8_v_lowpass(dst, full, stride, 16);\
  788. }\
  789. \
  790. static void OPNAME ## qpel8_mc03_c(uint8_t *dst, uint8_t *src, int stride){\
  791. uint8_t full[16*9];\
  792. uint8_t half[64];\
  793. copy_block9(full, src, 16, stride, 9);\
  794. put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
  795. OPNAME ## pixels8_l2_aligned(dst, full+16, half, stride, 16, 8, 8);\
  796. }\
  797. static void ff_ ## OPNAME ## qpel8_mc11_old_c(uint8_t *dst, uint8_t *src, int stride){\
  798. uint8_t full[16*9];\
  799. uint8_t halfH[72];\
  800. uint8_t halfV[64];\
  801. uint8_t halfHV[64];\
  802. copy_block9(full, src, 16, stride, 9);\
  803. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  804. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
  805. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  806. OPNAME ## pixels8_l4_aligned(dst, full, halfH, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
  807. }\
  808. static void OPNAME ## qpel8_mc11_c(uint8_t *dst, uint8_t *src, int stride){\
  809. uint8_t full[16*9];\
  810. uint8_t halfH[72];\
  811. uint8_t halfHV[64];\
  812. copy_block9(full, src, 16, stride, 9);\
  813. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  814. put ## RND ## pixels8_l2_aligned(halfH, halfH, full, 8, 8, 16, 9);\
  815. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  816. OPNAME ## pixels8_l2_aligned(dst, halfH, halfHV, stride, 8, 8, 8);\
  817. }\
  818. static void ff_ ## OPNAME ## qpel8_mc31_old_c(uint8_t *dst, uint8_t *src, int stride){\
  819. uint8_t full[16*9];\
  820. uint8_t halfH[72];\
  821. uint8_t halfV[64];\
  822. uint8_t halfHV[64];\
  823. copy_block9(full, src, 16, stride, 9);\
  824. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  825. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
  826. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  827. OPNAME ## pixels8_l4_aligned0(dst, full+1, halfH, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
  828. }\
  829. static void OPNAME ## qpel8_mc31_c(uint8_t *dst, uint8_t *src, int stride){\
  830. uint8_t full[16*9];\
  831. uint8_t halfH[72];\
  832. uint8_t halfHV[64];\
  833. copy_block9(full, src, 16, stride, 9);\
  834. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  835. put ## RND ## pixels8_l2_aligned1(halfH, halfH, full+1, 8, 8, 16, 9);\
  836. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  837. OPNAME ## pixels8_l2_aligned(dst, halfH, halfHV, stride, 8, 8, 8);\
  838. }\
  839. static void ff_ ## OPNAME ## qpel8_mc13_old_c(uint8_t *dst, uint8_t *src, int stride){\
  840. uint8_t full[16*9];\
  841. uint8_t halfH[72];\
  842. uint8_t halfV[64];\
  843. uint8_t halfHV[64];\
  844. copy_block9(full, src, 16, stride, 9);\
  845. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  846. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
  847. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  848. OPNAME ## pixels8_l4_aligned(dst, full+16, halfH+8, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
  849. }\
  850. static void OPNAME ## qpel8_mc13_c(uint8_t *dst, uint8_t *src, int stride){\
  851. uint8_t full[16*9];\
  852. uint8_t halfH[72];\
  853. uint8_t halfHV[64];\
  854. copy_block9(full, src, 16, stride, 9);\
  855. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  856. put ## RND ## pixels8_l2_aligned(halfH, halfH, full, 8, 8, 16, 9);\
  857. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  858. OPNAME ## pixels8_l2_aligned(dst, halfH+8, halfHV, stride, 8, 8, 8);\
  859. }\
  860. static void ff_ ## OPNAME ## qpel8_mc33_old_c(uint8_t *dst, uint8_t *src, int stride){\
  861. uint8_t full[16*9];\
  862. uint8_t halfH[72];\
  863. uint8_t halfV[64];\
  864. uint8_t halfHV[64];\
  865. copy_block9(full, src, 16, stride, 9);\
  866. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full , 8, 16, 9);\
  867. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
  868. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  869. OPNAME ## pixels8_l4_aligned0(dst, full+17, halfH+8, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
  870. }\
  871. static void OPNAME ## qpel8_mc33_c(uint8_t *dst, uint8_t *src, int stride){\
  872. uint8_t full[16*9];\
  873. uint8_t halfH[72];\
  874. uint8_t halfHV[64];\
  875. copy_block9(full, src, 16, stride, 9);\
  876. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  877. put ## RND ## pixels8_l2_aligned1(halfH, halfH, full+1, 8, 8, 16, 9);\
  878. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  879. OPNAME ## pixels8_l2_aligned(dst, halfH+8, halfHV, stride, 8, 8, 8);\
  880. }\
  881. static void OPNAME ## qpel8_mc21_c(uint8_t *dst, uint8_t *src, int stride){\
  882. uint8_t halfH[72];\
  883. uint8_t halfHV[64];\
  884. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
  885. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  886. OPNAME ## pixels8_l2_aligned(dst, halfH, halfHV, stride, 8, 8, 8);\
  887. }\
  888. static void OPNAME ## qpel8_mc23_c(uint8_t *dst, uint8_t *src, int stride){\
  889. uint8_t halfH[72];\
  890. uint8_t halfHV[64];\
  891. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
  892. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  893. OPNAME ## pixels8_l2_aligned(dst, halfH+8, halfHV, stride, 8, 8, 8);\
  894. }\
  895. static void ff_ ## OPNAME ## qpel8_mc12_old_c(uint8_t *dst, uint8_t *src, int stride){\
  896. uint8_t full[16*9];\
  897. uint8_t halfH[72];\
  898. uint8_t halfV[64];\
  899. uint8_t halfHV[64];\
  900. copy_block9(full, src, 16, stride, 9);\
  901. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  902. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
  903. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  904. OPNAME ## pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);\
  905. }\
  906. static void OPNAME ## qpel8_mc12_c(uint8_t *dst, uint8_t *src, int stride){\
  907. uint8_t full[16*9];\
  908. uint8_t halfH[72];\
  909. copy_block9(full, src, 16, stride, 9);\
  910. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  911. put ## RND ## pixels8_l2_aligned(halfH, halfH, full, 8, 8, 16, 9);\
  912. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
  913. }\
  914. static void ff_ ## OPNAME ## qpel8_mc32_old_c(uint8_t *dst, uint8_t *src, int stride){\
  915. uint8_t full[16*9];\
  916. uint8_t halfH[72];\
  917. uint8_t halfV[64];\
  918. uint8_t halfHV[64];\
  919. copy_block9(full, src, 16, stride, 9);\
  920. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  921. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
  922. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  923. OPNAME ## pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);\
  924. }\
  925. static void OPNAME ## qpel8_mc32_c(uint8_t *dst, uint8_t *src, int stride){\
  926. uint8_t full[16*9];\
  927. uint8_t halfH[72];\
  928. copy_block9(full, src, 16, stride, 9);\
  929. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  930. put ## RND ## pixels8_l2_aligned1(halfH, halfH, full+1, 8, 8, 16, 9);\
  931. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
  932. }\
  933. static void OPNAME ## qpel8_mc22_c(uint8_t *dst, uint8_t *src, int stride){\
  934. uint8_t halfH[72];\
  935. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
  936. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
  937. }\
  938. static void OPNAME ## qpel16_mc00_c (uint8_t *dst, uint8_t *src, int stride){\
  939. OPNAME ## pixels16_c(dst, src, stride, 16);\
  940. }\
  941. \
  942. static void OPNAME ## qpel16_mc10_c(uint8_t *dst, uint8_t *src, int stride){\
  943. uint8_t half[256];\
  944. put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
  945. OPNAME ## pixels16_l2_aligned2(dst, src, half, stride, stride, 16, 16);\
  946. }\
  947. \
  948. static void OPNAME ## qpel16_mc20_c(uint8_t *dst, uint8_t *src, int stride){\
  949. OPNAME ## mpeg4_qpel16_h_lowpass(dst, src, stride, stride, 16);\
  950. }\
  951. \
  952. static void OPNAME ## qpel16_mc30_c(uint8_t *dst, uint8_t *src, int stride){\
  953. uint8_t half[256];\
  954. put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
  955. OPNAME ## pixels16_l2_aligned2(dst, src+1, half, stride, stride, 16, 16);\
  956. }\
  957. \
  958. static void OPNAME ## qpel16_mc01_c(uint8_t *dst, uint8_t *src, int stride){\
  959. uint8_t full[24*17];\
  960. uint8_t half[256];\
  961. copy_block17(full, src, 24, stride, 17);\
  962. put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
  963. OPNAME ## pixels16_l2_aligned(dst, full, half, stride, 24, 16, 16);\
  964. }\
  965. \
  966. static void OPNAME ## qpel16_mc02_c(uint8_t *dst, uint8_t *src, int stride){\
  967. uint8_t full[24*17];\
  968. copy_block17(full, src, 24, stride, 17);\
  969. OPNAME ## mpeg4_qpel16_v_lowpass(dst, full, stride, 24);\
  970. }\
  971. \
  972. static void OPNAME ## qpel16_mc03_c(uint8_t *dst, uint8_t *src, int stride){\
  973. uint8_t full[24*17];\
  974. uint8_t half[256];\
  975. copy_block17(full, src, 24, stride, 17);\
  976. put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
  977. OPNAME ## pixels16_l2_aligned(dst, full+24, half, stride, 24, 16, 16);\
  978. }\
  979. static void ff_ ## OPNAME ## qpel16_mc11_old_c(uint8_t *dst, uint8_t *src, int stride){\
  980. uint8_t full[24*17];\
  981. uint8_t halfH[272];\
  982. uint8_t halfV[256];\
  983. uint8_t halfHV[256];\
  984. copy_block17(full, src, 24, stride, 17);\
  985. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  986. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
  987. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  988. OPNAME ## pixels16_l4_aligned(dst, full, halfH, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
  989. }\
  990. static void OPNAME ## qpel16_mc11_c(uint8_t *dst, uint8_t *src, int stride){\
  991. uint8_t full[24*17];\
  992. uint8_t halfH[272];\
  993. uint8_t halfHV[256];\
  994. copy_block17(full, src, 24, stride, 17);\
  995. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  996. put ## RND ## pixels16_l2_aligned(halfH, halfH, full, 16, 16, 24, 17);\
  997. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  998. OPNAME ## pixels16_l2_aligned(dst, halfH, halfHV, stride, 16, 16, 16);\
  999. }\
  1000. static void ff_ ## OPNAME ## qpel16_mc31_old_c(uint8_t *dst, uint8_t *src, int stride){\
  1001. uint8_t full[24*17];\
  1002. uint8_t halfH[272];\
  1003. uint8_t halfV[256];\
  1004. uint8_t halfHV[256];\
  1005. copy_block17(full, src, 24, stride, 17);\
  1006. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1007. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
  1008. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1009. OPNAME ## pixels16_l4_aligned0(dst, full+1, halfH, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
  1010. }\
  1011. static void OPNAME ## qpel16_mc31_c(uint8_t *dst, uint8_t *src, int stride){\
  1012. uint8_t full[24*17];\
  1013. uint8_t halfH[272];\
  1014. uint8_t halfHV[256];\
  1015. copy_block17(full, src, 24, stride, 17);\
  1016. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1017. put ## RND ## pixels16_l2_aligned1(halfH, halfH, full+1, 16, 16, 24, 17);\
  1018. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1019. OPNAME ## pixels16_l2_aligned(dst, halfH, halfHV, stride, 16, 16, 16);\
  1020. }\
  1021. static void ff_ ## OPNAME ## qpel16_mc13_old_c(uint8_t *dst, uint8_t *src, int stride){\
  1022. uint8_t full[24*17];\
  1023. uint8_t halfH[272];\
  1024. uint8_t halfV[256];\
  1025. uint8_t halfHV[256];\
  1026. copy_block17(full, src, 24, stride, 17);\
  1027. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1028. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
  1029. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1030. OPNAME ## pixels16_l4_aligned(dst, full+24, halfH+16, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
  1031. }\
  1032. static void OPNAME ## qpel16_mc13_c(uint8_t *dst, uint8_t *src, int stride){\
  1033. uint8_t full[24*17];\
  1034. uint8_t halfH[272];\
  1035. uint8_t halfHV[256];\
  1036. copy_block17(full, src, 24, stride, 17);\
  1037. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1038. put ## RND ## pixels16_l2_aligned(halfH, halfH, full, 16, 16, 24, 17);\
  1039. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1040. OPNAME ## pixels16_l2_aligned(dst, halfH+16, halfHV, stride, 16, 16, 16);\
  1041. }\
  1042. static void ff_ ## OPNAME ## qpel16_mc33_old_c(uint8_t *dst, uint8_t *src, int stride){\
  1043. uint8_t full[24*17];\
  1044. uint8_t halfH[272];\
  1045. uint8_t halfV[256];\
  1046. uint8_t halfHV[256];\
  1047. copy_block17(full, src, 24, stride, 17);\
  1048. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full , 16, 24, 17);\
  1049. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
  1050. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1051. OPNAME ## pixels16_l4_aligned0(dst, full+25, halfH+16, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
  1052. }\
  1053. static void OPNAME ## qpel16_mc33_c(uint8_t *dst, uint8_t *src, int stride){\
  1054. uint8_t full[24*17];\
  1055. uint8_t halfH[272];\
  1056. uint8_t halfHV[256];\
  1057. copy_block17(full, src, 24, stride, 17);\
  1058. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1059. put ## RND ## pixels16_l2_aligned1(halfH, halfH, full+1, 16, 16, 24, 17);\
  1060. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1061. OPNAME ## pixels16_l2_aligned(dst, halfH+16, halfHV, stride, 16, 16, 16);\
  1062. }\
  1063. static void OPNAME ## qpel16_mc21_c(uint8_t *dst, uint8_t *src, int stride){\
  1064. uint8_t halfH[272];\
  1065. uint8_t halfHV[256];\
  1066. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
  1067. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1068. OPNAME ## pixels16_l2_aligned(dst, halfH, halfHV, stride, 16, 16, 16);\
  1069. }\
  1070. static void OPNAME ## qpel16_mc23_c(uint8_t *dst, uint8_t *src, int stride){\
  1071. uint8_t halfH[272];\
  1072. uint8_t halfHV[256];\
  1073. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
  1074. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1075. OPNAME ## pixels16_l2_aligned(dst, halfH+16, halfHV, stride, 16, 16, 16);\
  1076. }\
  1077. static void ff_ ## OPNAME ## qpel16_mc12_old_c(uint8_t *dst, uint8_t *src, int stride){\
  1078. uint8_t full[24*17];\
  1079. uint8_t halfH[272];\
  1080. uint8_t halfV[256];\
  1081. uint8_t halfHV[256];\
  1082. copy_block17(full, src, 24, stride, 17);\
  1083. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1084. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
  1085. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1086. OPNAME ## pixels16_l2_aligned(dst, halfV, halfHV, stride, 16, 16, 16);\
  1087. }\
  1088. static void OPNAME ## qpel16_mc12_c(uint8_t *dst, uint8_t *src, int stride){\
  1089. uint8_t full[24*17];\
  1090. uint8_t halfH[272];\
  1091. copy_block17(full, src, 24, stride, 17);\
  1092. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1093. put ## RND ## pixels16_l2_aligned(halfH, halfH, full, 16, 16, 24, 17);\
  1094. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
  1095. }\
  1096. static void ff_ ## OPNAME ## qpel16_mc32_old_c(uint8_t *dst, uint8_t *src, int stride){\
  1097. uint8_t full[24*17];\
  1098. uint8_t halfH[272];\
  1099. uint8_t halfV[256];\
  1100. uint8_t halfHV[256];\
  1101. copy_block17(full, src, 24, stride, 17);\
  1102. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1103. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
  1104. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1105. OPNAME ## pixels16_l2_aligned(dst, halfV, halfHV, stride, 16, 16, 16);\
  1106. }\
  1107. static void OPNAME ## qpel16_mc32_c(uint8_t *dst, uint8_t *src, int stride){\
  1108. uint8_t full[24*17];\
  1109. uint8_t halfH[272];\
  1110. copy_block17(full, src, 24, stride, 17);\
  1111. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1112. put ## RND ## pixels16_l2_aligned1(halfH, halfH, full+1, 16, 16, 24, 17);\
  1113. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
  1114. }\
  1115. static void OPNAME ## qpel16_mc22_c(uint8_t *dst, uint8_t *src, int stride){\
  1116. uint8_t halfH[272];\
  1117. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
  1118. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
  1119. }
  1120. #define op_avg(a, b) a = (((a)+cm[((b) + 16)>>5]+1)>>1)
  1121. #define op_avg_no_rnd(a, b) a = (((a)+cm[((b) + 15)>>5])>>1)
  1122. #define op_put(a, b) a = cm[((b) + 16)>>5]
  1123. #define op_put_no_rnd(a, b) a = cm[((b) + 15)>>5]
  1124. QPEL_MC(0, put_ , _ , op_put)
  1125. QPEL_MC(1, put_no_rnd_, _no_rnd_, op_put_no_rnd)
  1126. QPEL_MC(0, avg_ , _ , op_avg)
  1127. //QPEL_MC(1, avg_no_rnd , _ , op_avg)
  1128. #undef op_avg
  1129. #undef op_avg_no_rnd
  1130. #undef op_put
  1131. #undef op_put_no_rnd
  1132. #if 1
  1133. #define H264_LOWPASS(OPNAME, OP, OP2) \
  1134. static inline void OPNAME ## h264_qpel_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,int w,int h){\
  1135. uint8_t *cm = cropTbl + MAX_NEG_CROP;\
  1136. do {\
  1137. int srcB,srcA,src0,src1,src2,src3,src4,src5,src6;\
  1138. uint8_t *s = src-2;\
  1139. srcB = *s++;\
  1140. srcA = *s++;\
  1141. src0 = *s++;\
  1142. src1 = *s++;\
  1143. src2 = *s++;\
  1144. src3 = *s++;\
  1145. OP(dst[0], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
  1146. src4 = *s++;\
  1147. OP(dst[1], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
  1148. src5 = *s++;\
  1149. OP(dst[2], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
  1150. src6 = *s++;\
  1151. OP(dst[3], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
  1152. if (w>4) { /* it optimized */ \
  1153. int src7,src8,src9,src10; \
  1154. src7 = *s++;\
  1155. OP(dst[4], (src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
  1156. src8 = *s++;\
  1157. OP(dst[5], (src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
  1158. src9 = *s++;\
  1159. OP(dst[6], (src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
  1160. src10 = *s++;\
  1161. OP(dst[7], (src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
  1162. if (w>8) { \
  1163. int src11,src12,src13,src14,src15,src16,src17,src18; \
  1164. src11 = *s++;\
  1165. OP(dst[8] , (src8 +src9 )*20 - (src7 +src10)*5 + (src6 +src11));\
  1166. src12 = *s++;\
  1167. OP(dst[9] , (src9 +src10)*20 - (src8 +src11)*5 + (src7 +src12));\
  1168. src13 = *s++;\
  1169. OP(dst[10], (src10+src11)*20 - (src9 +src12)*5 + (src8 +src13));\
  1170. src14 = *s++;\
  1171. OP(dst[11], (src11+src12)*20 - (src10+src13)*5 + (src9 +src14));\
  1172. src15 = *s++;\
  1173. OP(dst[12], (src12+src13)*20 - (src11+src14)*5 + (src10+src15));\
  1174. src16 = *s++;\
  1175. OP(dst[13], (src13+src14)*20 - (src12+src15)*5 + (src11+src16));\
  1176. src17 = *s++;\
  1177. OP(dst[14], (src14+src15)*20 - (src13+src16)*5 + (src12+src17));\
  1178. src18 = *s++;\
  1179. OP(dst[15], (src15+src16)*20 - (src14+src17)*5 + (src13+src18));\
  1180. } \
  1181. } \
  1182. dst+=dstStride;\
  1183. src+=srcStride;\
  1184. }while(--h);\
  1185. }\
  1186. \
  1187. static inline void OPNAME ## h264_qpel_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,int w,int h){\
  1188. uint8_t *cm = cropTbl + MAX_NEG_CROP;\
  1189. do{\
  1190. int srcB,srcA,src0,src1,src2,src3,src4,src5,src6;\
  1191. uint8_t *s = src-2*srcStride,*d=dst;\
  1192. srcB = *s; s+=srcStride;\
  1193. srcA = *s; s+=srcStride;\
  1194. src0 = *s; s+=srcStride;\
  1195. src1 = *s; s+=srcStride;\
  1196. src2 = *s; s+=srcStride;\
  1197. src3 = *s; s+=srcStride;\
  1198. OP(*d, (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));d+=dstStride;\
  1199. src4 = *s; s+=srcStride;\
  1200. OP(*d, (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));d+=dstStride;\
  1201. src5 = *s; s+=srcStride;\
  1202. OP(*d, (src2+src3)*20 - (src1+src4)*5 + (src0+src5));d+=dstStride;\
  1203. src6 = *s; s+=srcStride;\
  1204. OP(*d, (src3+src4)*20 - (src2+src5)*5 + (src1+src6));d+=dstStride;\
  1205. if (h>4) { \
  1206. int src7,src8,src9,src10; \
  1207. src7 = *s; s+=srcStride;\
  1208. OP(*d, (src4+src5)*20 - (src3+src6)*5 + (src2+src7));d+=dstStride;\
  1209. src8 = *s; s+=srcStride;\
  1210. OP(*d, (src5+src6)*20 - (src4+src7)*5 + (src3+src8));d+=dstStride;\
  1211. src9 = *s; s+=srcStride;\
  1212. OP(*d, (src6+src7)*20 - (src5+src8)*5 + (src4+src9));d+=dstStride;\
  1213. src10 = *s; s+=srcStride;\
  1214. OP(*d, (src7+src8)*20 - (src6+src9)*5 + (src5+src10));d+=dstStride;\
  1215. if (h>8) { \
  1216. int src11,src12,src13,src14,src15,src16,src17,src18; \
  1217. src11 = *s; s+=srcStride;\
  1218. OP(*d , (src8 +src9 )*20 - (src7 +src10)*5 + (src6 +src11));d+=dstStride;\
  1219. src12 = *s; s+=srcStride;\
  1220. OP(*d , (src9 +src10)*20 - (src8 +src11)*5 + (src7 +src12));d+=dstStride;\
  1221. src13 = *s; s+=srcStride;\
  1222. OP(*d, (src10+src11)*20 - (src9 +src12)*5 + (src8 +src13));d+=dstStride;\
  1223. src14 = *s; s+=srcStride;\
  1224. OP(*d, (src11+src12)*20 - (src10+src13)*5 + (src9 +src14));d+=dstStride;\
  1225. src15 = *s; s+=srcStride;\
  1226. OP(*d, (src12+src13)*20 - (src11+src14)*5 + (src10+src15));d+=dstStride;\
  1227. src16 = *s; s+=srcStride;\
  1228. OP(*d, (src13+src14)*20 - (src12+src15)*5 + (src11+src16));d+=dstStride;\
  1229. src17 = *s; s+=srcStride;\
  1230. OP(*d, (src14+src15)*20 - (src13+src16)*5 + (src12+src17));d+=dstStride;\
  1231. src18 = *s; s+=srcStride;\
  1232. OP(*d, (src15+src16)*20 - (src14+src17)*5 + (src13+src18));d+=dstStride;\
  1233. } \
  1234. } \
  1235. dst++;\
  1236. src++;\
  1237. }while(--w);\
  1238. }\
  1239. \
  1240. static inline void OPNAME ## h264_qpel_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride,int w,int h){\
  1241. uint8_t *cm = cropTbl + MAX_NEG_CROP;\
  1242. int i;\
  1243. src -= 2*srcStride;\
  1244. i= h+5; \
  1245. do {\
  1246. int srcB,srcA,src0,src1,src2,src3,src4,src5,src6;\
  1247. uint8_t *s = src-2;\
  1248. srcB = *s++;\
  1249. srcA = *s++;\
  1250. src0 = *s++;\
  1251. src1 = *s++;\
  1252. src2 = *s++;\
  1253. src3 = *s++;\
  1254. tmp[0] = ((src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
  1255. src4 = *s++;\
  1256. tmp[1] = ((src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
  1257. src5 = *s++;\
  1258. tmp[2] = ((src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
  1259. src6 = *s++;\
  1260. tmp[3] = ((src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
  1261. if (w>4) { /* it optimized */ \
  1262. int src7,src8,src9,src10; \
  1263. src7 = *s++;\
  1264. tmp[4] = ((src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
  1265. src8 = *s++;\
  1266. tmp[5] = ((src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
  1267. src9 = *s++;\
  1268. tmp[6] = ((src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
  1269. src10 = *s++;\
  1270. tmp[7] = ((src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
  1271. if (w>8) { \
  1272. int src11,src12,src13,src14,src15,src16,src17,src18; \
  1273. src11 = *s++;\
  1274. tmp[8] = ((src8 +src9 )*20 - (src7 +src10)*5 + (src6 +src11));\
  1275. src12 = *s++;\
  1276. tmp[9] = ((src9 +src10)*20 - (src8 +src11)*5 + (src7 +src12));\
  1277. src13 = *s++;\
  1278. tmp[10] = ((src10+src11)*20 - (src9 +src12)*5 + (src8 +src13));\
  1279. src14 = *s++;\
  1280. tmp[11] = ((src11+src12)*20 - (src10+src13)*5 + (src9 +src14));\
  1281. src15 = *s++;\
  1282. tmp[12] = ((src12+src13)*20 - (src11+src14)*5 + (src10+src15));\
  1283. src16 = *s++;\
  1284. tmp[13] = ((src13+src14)*20 - (src12+src15)*5 + (src11+src16));\
  1285. src17 = *s++;\
  1286. tmp[14] = ((src14+src15)*20 - (src13+src16)*5 + (src12+src17));\
  1287. src18 = *s++;\
  1288. tmp[15] = ((src15+src16)*20 - (src14+src17)*5 + (src13+src18));\
  1289. } \
  1290. } \
  1291. tmp+=tmpStride;\
  1292. src+=srcStride;\
  1293. }while(--i);\
  1294. tmp -= tmpStride*(h+5-2);\
  1295. i = w; \
  1296. do {\
  1297. int tmpB,tmpA,tmp0,tmp1,tmp2,tmp3,tmp4,tmp5,tmp6;\
  1298. int16_t *s = tmp-2*tmpStride; \
  1299. uint8_t *d=dst;\
  1300. tmpB = *s; s+=tmpStride;\
  1301. tmpA = *s; s+=tmpStride;\
  1302. tmp0 = *s; s+=tmpStride;\
  1303. tmp1 = *s; s+=tmpStride;\
  1304. tmp2 = *s; s+=tmpStride;\
  1305. tmp3 = *s; s+=tmpStride;\
  1306. OP2(*d, (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));d+=dstStride;\
  1307. tmp4 = *s; s+=tmpStride;\
  1308. OP2(*d, (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));d+=dstStride;\
  1309. tmp5 = *s; s+=tmpStride;\
  1310. OP2(*d, (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));d+=dstStride;\
  1311. tmp6 = *s; s+=tmpStride;\
  1312. OP2(*d, (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));d+=dstStride;\
  1313. if (h>4) { \
  1314. int tmp7,tmp8,tmp9,tmp10; \
  1315. tmp7 = *s; s+=tmpStride;\
  1316. OP2(*d, (tmp4+tmp5)*20 - (tmp3+tmp6)*5 + (tmp2+tmp7));d+=dstStride;\
  1317. tmp8 = *s; s+=tmpStride;\
  1318. OP2(*d, (tmp5+tmp6)*20 - (tmp4+tmp7)*5 + (tmp3+tmp8));d+=dstStride;\
  1319. tmp9 = *s; s+=tmpStride;\
  1320. OP2(*d, (tmp6+tmp7)*20 - (tmp5+tmp8)*5 + (tmp4+tmp9));d+=dstStride;\
  1321. tmp10 = *s; s+=tmpStride;\
  1322. OP2(*d, (tmp7+tmp8)*20 - (tmp6+tmp9)*5 + (tmp5+tmp10));d+=dstStride;\
  1323. if (h>8) { \
  1324. int tmp11,tmp12,tmp13,tmp14,tmp15,tmp16,tmp17,tmp18; \
  1325. tmp11 = *s; s+=tmpStride;\
  1326. OP2(*d , (tmp8 +tmp9 )*20 - (tmp7 +tmp10)*5 + (tmp6 +tmp11));d+=dstStride;\
  1327. tmp12 = *s; s+=tmpStride;\
  1328. OP2(*d , (tmp9 +tmp10)*20 - (tmp8 +tmp11)*5 + (tmp7 +tmp12));d+=dstStride;\
  1329. tmp13 = *s; s+=tmpStride;\
  1330. OP2(*d, (tmp10+tmp11)*20 - (tmp9 +tmp12)*5 + (tmp8 +tmp13));d+=dstStride;\
  1331. tmp14 = *s; s+=tmpStride;\
  1332. OP2(*d, (tmp11+tmp12)*20 - (tmp10+tmp13)*5 + (tmp9 +tmp14));d+=dstStride;\
  1333. tmp15 = *s; s+=tmpStride;\
  1334. OP2(*d, (tmp12+tmp13)*20 - (tmp11+tmp14)*5 + (tmp10+tmp15));d+=dstStride;\
  1335. tmp16 = *s; s+=tmpStride;\
  1336. OP2(*d, (tmp13+tmp14)*20 - (tmp12+tmp15)*5 + (tmp11+tmp16));d+=dstStride;\
  1337. tmp17 = *s; s+=tmpStride;\
  1338. OP2(*d, (tmp14+tmp15)*20 - (tmp13+tmp16)*5 + (tmp12+tmp17));d+=dstStride;\
  1339. tmp18 = *s; s+=tmpStride;\
  1340. OP2(*d, (tmp15+tmp16)*20 - (tmp14+tmp17)*5 + (tmp13+tmp18));d+=dstStride;\
  1341. } \
  1342. } \
  1343. dst++;\
  1344. tmp++;\
  1345. }while(--i);\
  1346. }\
  1347. \
  1348. static void OPNAME ## h264_qpel4_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1349. OPNAME ## h264_qpel_h_lowpass(dst,src,dstStride,srcStride,4,4); \
  1350. }\
  1351. static void OPNAME ## h264_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1352. OPNAME ## h264_qpel_h_lowpass(dst,src,dstStride,srcStride,8,8); \
  1353. }\
  1354. static void OPNAME ## h264_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1355. OPNAME ## h264_qpel_h_lowpass(dst,src,dstStride,srcStride,16,16); \
  1356. }\
  1357. \
  1358. static void OPNAME ## h264_qpel4_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1359. OPNAME ## h264_qpel_v_lowpass(dst,src,dstStride,srcStride,4,4); \
  1360. }\
  1361. static void OPNAME ## h264_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1362. OPNAME ## h264_qpel_v_lowpass(dst,src,dstStride,srcStride,8,8); \
  1363. }\
  1364. static void OPNAME ## h264_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1365. OPNAME ## h264_qpel_v_lowpass(dst,src,dstStride,srcStride,16,16); \
  1366. }\
  1367. static void OPNAME ## h264_qpel4_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  1368. OPNAME ## h264_qpel_hv_lowpass(dst,tmp,src,dstStride,tmpStride,srcStride,4,4); \
  1369. }\
  1370. static void OPNAME ## h264_qpel8_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  1371. OPNAME ## h264_qpel_hv_lowpass(dst,tmp,src,dstStride,tmpStride,srcStride,8,8); \
  1372. }\
  1373. static void OPNAME ## h264_qpel16_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  1374. OPNAME ## h264_qpel_hv_lowpass(dst,tmp,src,dstStride,tmpStride,srcStride,16,16); \
  1375. }\
  1376. #define H264_MC(OPNAME, SIZE) \
  1377. static void OPNAME ## h264_qpel ## SIZE ## _mc00_c (uint8_t *dst, uint8_t *src, int stride){\
  1378. OPNAME ## pixels ## SIZE ## _c(dst, src, stride, SIZE);\
  1379. }\
  1380. \
  1381. static void OPNAME ## h264_qpel ## SIZE ## _mc10_c(uint8_t *dst, uint8_t *src, int stride){\
  1382. uint8_t half[SIZE*SIZE];\
  1383. put_h264_qpel ## SIZE ## _h_lowpass(half, src, SIZE, stride);\
  1384. OPNAME ## pixels ## SIZE ## _l2_aligned2(dst, src, half, stride, stride, SIZE, SIZE);\
  1385. }\
  1386. \
  1387. static void OPNAME ## h264_qpel ## SIZE ## _mc20_c(uint8_t *dst, uint8_t *src, int stride){\
  1388. OPNAME ## h264_qpel ## SIZE ## _h_lowpass(dst, src, stride, stride);\
  1389. }\
  1390. \
  1391. static void OPNAME ## h264_qpel ## SIZE ## _mc30_c(uint8_t *dst, uint8_t *src, int stride){\
  1392. uint8_t half[SIZE*SIZE];\
  1393. put_h264_qpel ## SIZE ## _h_lowpass(half, src, SIZE, stride);\
  1394. OPNAME ## pixels ## SIZE ## _l2_aligned2(dst, src+1, half, stride, stride, SIZE, SIZE);\
  1395. }\
  1396. \
  1397. static void OPNAME ## h264_qpel ## SIZE ## _mc01_c(uint8_t *dst, uint8_t *src, int stride){\
  1398. uint8_t full[SIZE*(SIZE+5)];\
  1399. uint8_t * const full_mid= full + SIZE*2;\
  1400. uint8_t half[SIZE*SIZE];\
  1401. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1402. put_h264_qpel ## SIZE ## _v_lowpass(half, full_mid, SIZE, SIZE);\
  1403. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, full_mid, half, stride, SIZE, SIZE, SIZE);\
  1404. }\
  1405. \
  1406. static void OPNAME ## h264_qpel ## SIZE ## _mc02_c(uint8_t *dst, uint8_t *src, int stride){\
  1407. uint8_t full[SIZE*(SIZE+5)];\
  1408. uint8_t * const full_mid= full + SIZE*2;\
  1409. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1410. OPNAME ## h264_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE);\
  1411. }\
  1412. \
  1413. static void OPNAME ## h264_qpel ## SIZE ## _mc03_c(uint8_t *dst, uint8_t *src, int stride){\
  1414. uint8_t full[SIZE*(SIZE+5)];\
  1415. uint8_t * const full_mid= full + SIZE*2;\
  1416. uint8_t half[SIZE*SIZE];\
  1417. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1418. put_h264_qpel ## SIZE ## _v_lowpass(half, full_mid, SIZE, SIZE);\
  1419. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, full_mid+SIZE, half, stride, SIZE, SIZE, SIZE);\
  1420. }\
  1421. \
  1422. static void OPNAME ## h264_qpel ## SIZE ## _mc11_c(uint8_t *dst, uint8_t *src, int stride){\
  1423. uint8_t full[SIZE*(SIZE+5)];\
  1424. uint8_t * const full_mid= full + SIZE*2;\
  1425. uint8_t halfH[SIZE*SIZE];\
  1426. uint8_t halfV[SIZE*SIZE];\
  1427. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
  1428. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1429. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1430. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
  1431. }\
  1432. \
  1433. static void OPNAME ## h264_qpel ## SIZE ## _mc31_c(uint8_t *dst, uint8_t *src, int stride){\
  1434. uint8_t full[SIZE*(SIZE+5)];\
  1435. uint8_t * const full_mid= full + SIZE*2;\
  1436. uint8_t halfH[SIZE*SIZE];\
  1437. uint8_t halfV[SIZE*SIZE];\
  1438. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
  1439. copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
  1440. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1441. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
  1442. }\
  1443. \
  1444. static void OPNAME ## h264_qpel ## SIZE ## _mc13_c(uint8_t *dst, uint8_t *src, int stride){\
  1445. uint8_t full[SIZE*(SIZE+5)];\
  1446. uint8_t * const full_mid= full + SIZE*2;\
  1447. uint8_t halfH[SIZE*SIZE];\
  1448. uint8_t halfV[SIZE*SIZE];\
  1449. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
  1450. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1451. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1452. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
  1453. }\
  1454. \
  1455. static void OPNAME ## h264_qpel ## SIZE ## _mc33_c(uint8_t *dst, uint8_t *src, int stride){\
  1456. uint8_t full[SIZE*(SIZE+5)];\
  1457. uint8_t * const full_mid= full + SIZE*2;\
  1458. uint8_t halfH[SIZE*SIZE];\
  1459. uint8_t halfV[SIZE*SIZE];\
  1460. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
  1461. copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
  1462. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1463. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
  1464. }\
  1465. \
  1466. static void OPNAME ## h264_qpel ## SIZE ## _mc22_c(uint8_t *dst, uint8_t *src, int stride){\
  1467. int16_t tmp[SIZE*(SIZE+5)];\
  1468. OPNAME ## h264_qpel ## SIZE ## _hv_lowpass(dst, tmp, src, stride, SIZE, stride);\
  1469. }\
  1470. \
  1471. static void OPNAME ## h264_qpel ## SIZE ## _mc21_c(uint8_t *dst, uint8_t *src, int stride){\
  1472. int16_t tmp[SIZE*(SIZE+5)];\
  1473. uint8_t halfH[SIZE*SIZE];\
  1474. uint8_t halfHV[SIZE*SIZE];\
  1475. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
  1476. put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
  1477. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfHV, stride, SIZE, SIZE, SIZE);\
  1478. }\
  1479. \
  1480. static void OPNAME ## h264_qpel ## SIZE ## _mc23_c(uint8_t *dst, uint8_t *src, int stride){\
  1481. int16_t tmp[SIZE*(SIZE+5)];\
  1482. uint8_t halfH[SIZE*SIZE];\
  1483. uint8_t halfHV[SIZE*SIZE];\
  1484. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
  1485. put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
  1486. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfHV, stride, SIZE, SIZE, SIZE);\
  1487. }\
  1488. \
  1489. static void OPNAME ## h264_qpel ## SIZE ## _mc12_c(uint8_t *dst, uint8_t *src, int stride){\
  1490. uint8_t full[SIZE*(SIZE+5)];\
  1491. uint8_t * const full_mid= full + SIZE*2;\
  1492. int16_t tmp[SIZE*(SIZE+5)];\
  1493. uint8_t halfV[SIZE*SIZE];\
  1494. uint8_t halfHV[SIZE*SIZE];\
  1495. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1496. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1497. put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
  1498. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfV, halfHV, stride, SIZE, SIZE, SIZE);\
  1499. }\
  1500. \
  1501. static void OPNAME ## h264_qpel ## SIZE ## _mc32_c(uint8_t *dst, uint8_t *src, int stride){\
  1502. uint8_t full[SIZE*(SIZE+5)];\
  1503. uint8_t * const full_mid= full + SIZE*2;\
  1504. int16_t tmp[SIZE*(SIZE+5)];\
  1505. uint8_t halfV[SIZE*SIZE];\
  1506. uint8_t halfHV[SIZE*SIZE];\
  1507. copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
  1508. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1509. put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
  1510. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfV, halfHV, stride, SIZE, SIZE, SIZE);\
  1511. }\
  1512. #define op_avg(a, b) a = (((a)+cm[((b) + 16)>>5]+1)>>1)
  1513. //#define op_avg2(a, b) a = (((a)*w1+cm[((b) + 16)>>5]*w2 + o + 64)>>7)
  1514. #define op_put(a, b) a = cm[((b) + 16)>>5]
  1515. #define op2_avg(a, b) a = (((a)+cm[((b) + 512)>>10]+1)>>1)
  1516. #define op2_put(a, b) a = cm[((b) + 512)>>10]
  1517. H264_LOWPASS(put_ , op_put, op2_put)
  1518. H264_LOWPASS(avg_ , op_avg, op2_avg)
  1519. H264_MC(put_, 4)
  1520. H264_MC(put_, 8)
  1521. H264_MC(put_, 16)
  1522. H264_MC(avg_, 4)
  1523. H264_MC(avg_, 8)
  1524. H264_MC(avg_, 16)
  1525. #undef op_avg
  1526. #undef op_put
  1527. #undef op2_avg
  1528. #undef op2_put
  1529. #endif
  1530. static void wmv2_mspel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){
  1531. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  1532. do{
  1533. int src_1,src0,src1,src2,src3,src4,src5,src6,src7,src8,src9;
  1534. uint8_t *s = src;
  1535. src_1 = s[-1];
  1536. src0 = *s++;
  1537. src1 = *s++;
  1538. src2 = *s++;
  1539. dst[0]= cm[(9*(src0 + src1) - (src_1 + src2) + 8)>>4];
  1540. src3 = *s++;
  1541. dst[1]= cm[(9*(src1 + src2) - (src0 + src3) + 8)>>4];
  1542. src4 = *s++;
  1543. dst[2]= cm[(9*(src2 + src3) - (src1 + src4) + 8)>>4];
  1544. src5 = *s++;
  1545. dst[3]= cm[(9*(src3 + src4) - (src2 + src5) + 8)>>4];
  1546. src6 = *s++;
  1547. dst[4]= cm[(9*(src4 + src5) - (src3 + src6) + 8)>>4];
  1548. src7 = *s++;
  1549. dst[5]= cm[(9*(src5 + src6) - (src4 + src7) + 8)>>4];
  1550. src8 = *s++;
  1551. dst[6]= cm[(9*(src6 + src7) - (src5 + src8) + 8)>>4];
  1552. src9 = *s++;
  1553. dst[7]= cm[(9*(src7 + src8) - (src6 + src9) + 8)>>4];
  1554. dst+=dstStride;
  1555. src+=srcStride;
  1556. }while(--h);
  1557. }
  1558. static void wmv2_mspel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int w){
  1559. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  1560. do{
  1561. int src_1,src0,src1,src2,src3,src4,src5,src6,src7,src8,src9;
  1562. uint8_t *s = src,*d = dst;
  1563. src_1 = *(s-srcStride);
  1564. src0 = *s; s+=srcStride;
  1565. src1 = *s; s+=srcStride;
  1566. src2 = *s; s+=srcStride;
  1567. *d= cm[(9*(src0 + src1) - (src_1 + src2) + 8)>>4]; d+=dstStride;
  1568. src3 = *s; s+=srcStride;
  1569. *d= cm[(9*(src1 + src2) - (src0 + src3) + 8)>>4]; d+=dstStride;
  1570. src4 = *s; s+=srcStride;
  1571. *d= cm[(9*(src2 + src3) - (src1 + src4) + 8)>>4]; d+=dstStride;
  1572. src5 = *s; s+=srcStride;
  1573. *d= cm[(9*(src3 + src4) - (src2 + src5) + 8)>>4]; d+=dstStride;
  1574. src6 = *s; s+=srcStride;
  1575. *d= cm[(9*(src4 + src5) - (src3 + src6) + 8)>>4]; d+=dstStride;
  1576. src7 = *s; s+=srcStride;
  1577. *d= cm[(9*(src5 + src6) - (src4 + src7) + 8)>>4]; d+=dstStride;
  1578. src8 = *s; s+=srcStride;
  1579. *d= cm[(9*(src6 + src7) - (src5 + src8) + 8)>>4]; d+=dstStride;
  1580. src9 = *s;
  1581. *d= cm[(9*(src7 + src8) - (src6 + src9) + 8)>>4]; d+=dstStride;
  1582. src++;
  1583. dst++;
  1584. }while(--w);
  1585. }
  1586. static void put_mspel8_mc00_c (uint8_t *dst, uint8_t *src, int stride){
  1587. put_pixels8_c(dst, src, stride, 8);
  1588. }
  1589. static void put_mspel8_mc10_c(uint8_t *dst, uint8_t *src, int stride){
  1590. uint8_t half[64];
  1591. wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
  1592. put_pixels8_l2_aligned2(dst, src, half, stride, stride, 8, 8);
  1593. }
  1594. static void put_mspel8_mc20_c(uint8_t *dst, uint8_t *src, int stride){
  1595. wmv2_mspel8_h_lowpass(dst, src, stride, stride, 8);
  1596. }
  1597. static void put_mspel8_mc30_c(uint8_t *dst, uint8_t *src, int stride){
  1598. uint8_t half[64];
  1599. wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
  1600. put_pixels8_l2_aligned2(dst, src+1, half, stride, stride, 8, 8);
  1601. }
  1602. static void put_mspel8_mc02_c(uint8_t *dst, uint8_t *src, int stride){
  1603. wmv2_mspel8_v_lowpass(dst, src, stride, stride, 8);
  1604. }
  1605. static void put_mspel8_mc12_c(uint8_t *dst, uint8_t *src, int stride){
  1606. uint8_t halfH[88];
  1607. uint8_t halfV[64];
  1608. uint8_t halfHV[64];
  1609. wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
  1610. wmv2_mspel8_v_lowpass(halfV, src, 8, stride, 8);
  1611. wmv2_mspel8_v_lowpass(halfHV, halfH+8, 8, 8, 8);
  1612. put_pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);
  1613. }
  1614. static void put_mspel8_mc32_c(uint8_t *dst, uint8_t *src, int stride){
  1615. uint8_t halfH[88];
  1616. uint8_t halfV[64];
  1617. uint8_t halfHV[64];
  1618. wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
  1619. wmv2_mspel8_v_lowpass(halfV, src+1, 8, stride, 8);
  1620. wmv2_mspel8_v_lowpass(halfHV, halfH+8, 8, 8, 8);
  1621. put_pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);
  1622. }
  1623. static void put_mspel8_mc22_c(uint8_t *dst, uint8_t *src, int stride){
  1624. uint8_t halfH[88];
  1625. wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
  1626. wmv2_mspel8_v_lowpass(dst, halfH+8, stride, 8, 8);
  1627. }