You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1666 lines
70KB

  1. /*
  2. * This is optimized for sh, which have post increment addressing (*p++).
  3. * Some CPU may be index (p[n]) faster than post increment (*p++).
  4. *
  5. * copyright (c) 2001-2003 BERO <bero@geocities.co.jp>
  6. *
  7. * This library is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2 of the License, or (at your option) any later version.
  11. *
  12. * This library is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with this library; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #define LD(adr) *(uint32_t*)(adr)
  22. #define PIXOP2(OPNAME, OP) \
  23. /*static inline void OPNAME ## _no_rnd_pixels8_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  24. {\
  25. do {\
  26. OP(LP(dst ),no_rnd_avg32(LD32(src1 ),LD32(src2 )) ); \
  27. OP(LP(dst+4),no_rnd_avg32(LD32(src1+4),LD32(src2+4)) ); \
  28. src1+=src_stride1; \
  29. src2+=src_stride2; \
  30. dst+=dst_stride; \
  31. } while(--h); \
  32. }\
  33. \
  34. static inline void OPNAME ## _pixels8_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  35. {\
  36. do {\
  37. OP(LP(dst ),rnd_avg32(LD32(src1 ),LD32(src2 )) ); \
  38. OP(LP(dst+4),rnd_avg32(LD32(src1+4),LD32(src2+4)) ); \
  39. src1+=src_stride1; \
  40. src2+=src_stride2; \
  41. dst+=dst_stride; \
  42. } while(--h); \
  43. }\
  44. \
  45. static inline void OPNAME ## _pixels4_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  46. {\
  47. do {\
  48. OP(LP(dst ),rnd_avg32(LD32(src1 ),LD32(src2 )) ); \
  49. src1+=src_stride1; \
  50. src2+=src_stride2; \
  51. dst+=dst_stride; \
  52. } while(--h); \
  53. }\
  54. \
  55. static inline void OPNAME ## _no_rnd_pixels16_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  56. {\
  57. do {\
  58. OP(LP(dst ),no_rnd_avg32(LD32(src1 ),LD32(src2 )) ); \
  59. OP(LP(dst+4),no_rnd_avg32(LD32(src1+4),LD32(src2+4)) ); \
  60. OP(LP(dst+8),no_rnd_avg32(LD32(src1+8),LD32(src2+8)) ); \
  61. OP(LP(dst+12),no_rnd_avg32(LD32(src1+12),LD32(src2+12)) ); \
  62. src1+=src_stride1; \
  63. src2+=src_stride2; \
  64. dst+=dst_stride; \
  65. } while(--h); \
  66. }\
  67. \
  68. static inline void OPNAME ## _pixels16_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  69. {\
  70. do {\
  71. OP(LP(dst ),rnd_avg32(LD32(src1 ),LD32(src2 )) ); \
  72. OP(LP(dst+4),rnd_avg32(LD32(src1+4),LD32(src2+4)) ); \
  73. OP(LP(dst+8),rnd_avg32(LD32(src1+8),LD32(src2+8)) ); \
  74. OP(LP(dst+12),rnd_avg32(LD32(src1+12),LD32(src2+12)) ); \
  75. src1+=src_stride1; \
  76. src2+=src_stride2; \
  77. dst+=dst_stride; \
  78. } while(--h); \
  79. }*/\
  80. \
  81. static inline void OPNAME ## _pixels4_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  82. {\
  83. do {\
  84. OP(LP(dst ),rnd_avg32(LP(src1 ),LP(src2 )) ); \
  85. src1+=src_stride1; \
  86. src2+=src_stride2; \
  87. dst+=dst_stride; \
  88. } while(--h); \
  89. }\
  90. \
  91. static inline void OPNAME ## _pixels4_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  92. {\
  93. do {\
  94. OP(LP(dst ),rnd_avg32(LD32(src1 ),LP(src2 )) ); \
  95. src1+=src_stride1; \
  96. src2+=src_stride2; \
  97. dst+=dst_stride; \
  98. } while(--h); \
  99. }\
  100. \
  101. static inline void OPNAME ## _no_rnd_pixels16_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  102. {\
  103. do {\
  104. OP(LP(dst ),no_rnd_avg32(LD32(src1 ),LP(src2 )) ); \
  105. OP(LP(dst+4),no_rnd_avg32(LD32(src1+4),LP(src2+4)) ); \
  106. OP(LP(dst+8),no_rnd_avg32(LD32(src1+8),LP(src2+8)) ); \
  107. OP(LP(dst+12),no_rnd_avg32(LD32(src1+12),LP(src2+12)) ); \
  108. src1+=src_stride1; \
  109. src2+=src_stride2; \
  110. dst+=dst_stride; \
  111. } while(--h); \
  112. }\
  113. \
  114. static inline void OPNAME ## _pixels16_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  115. {\
  116. do {\
  117. OP(LP(dst ),rnd_avg32(LD32(src1 ),LP(src2 )) ); \
  118. OP(LP(dst+4),rnd_avg32(LD32(src1+4),LP(src2+4)) ); \
  119. OP(LP(dst+8),rnd_avg32(LD32(src1+8),LP(src2+8)) ); \
  120. OP(LP(dst+12),rnd_avg32(LD32(src1+12),LP(src2+12)) ); \
  121. src1+=src_stride1; \
  122. src2+=src_stride2; \
  123. dst+=dst_stride; \
  124. } while(--h); \
  125. }\
  126. \
  127. static inline void OPNAME ## _no_rnd_pixels8_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  128. {\
  129. do { /* onlye src2 aligned */\
  130. OP(LP(dst ),no_rnd_avg32(LD32(src1 ),LP(src2 )) ); \
  131. OP(LP(dst+4),no_rnd_avg32(LD32(src1+4),LP(src2+4)) ); \
  132. src1+=src_stride1; \
  133. src2+=src_stride2; \
  134. dst+=dst_stride; \
  135. } while(--h); \
  136. }\
  137. \
  138. static inline void OPNAME ## _pixels8_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  139. {\
  140. do {\
  141. OP(LP(dst ),rnd_avg32(LD32(src1 ),LP(src2 )) ); \
  142. OP(LP(dst+4),rnd_avg32(LD32(src1+4),LP(src2+4)) ); \
  143. src1+=src_stride1; \
  144. src2+=src_stride2; \
  145. dst+=dst_stride; \
  146. } while(--h); \
  147. }\
  148. \
  149. static inline void OPNAME ## _no_rnd_pixels8_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  150. {\
  151. do {\
  152. OP(LP(dst ),no_rnd_avg32(LP(src1 ),LP(src2 )) ); \
  153. OP(LP(dst+4),no_rnd_avg32(LP(src1+4),LP(src2+4)) ); \
  154. src1+=src_stride1; \
  155. src2+=src_stride2; \
  156. dst+=dst_stride; \
  157. } while(--h); \
  158. }\
  159. \
  160. static inline void OPNAME ## _pixels8_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  161. {\
  162. do {\
  163. OP(LP(dst ),rnd_avg32(LP(src1 ),LP(src2 )) ); \
  164. OP(LP(dst+4),rnd_avg32(LP(src1+4),LP(src2+4)) ); \
  165. src1+=src_stride1; \
  166. src2+=src_stride2; \
  167. dst+=dst_stride; \
  168. } while(--h); \
  169. }\
  170. \
  171. static inline void OPNAME ## _no_rnd_pixels16_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  172. {\
  173. do {\
  174. OP(LP(dst ),no_rnd_avg32(LP(src1 ),LP(src2 )) ); \
  175. OP(LP(dst+4),no_rnd_avg32(LP(src1+4),LP(src2+4)) ); \
  176. OP(LP(dst+8),no_rnd_avg32(LP(src1+8),LP(src2+8)) ); \
  177. OP(LP(dst+12),no_rnd_avg32(LP(src1+12),LP(src2+12)) ); \
  178. src1+=src_stride1; \
  179. src2+=src_stride2; \
  180. dst+=dst_stride; \
  181. } while(--h); \
  182. }\
  183. \
  184. static inline void OPNAME ## _pixels16_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  185. {\
  186. do {\
  187. OP(LP(dst ),rnd_avg32(LP(src1 ),LP(src2 )) ); \
  188. OP(LP(dst+4),rnd_avg32(LP(src1+4),LP(src2+4)) ); \
  189. OP(LP(dst+8),rnd_avg32(LP(src1+8),LP(src2+8)) ); \
  190. OP(LP(dst+12),rnd_avg32(LP(src1+12),LP(src2+12)) ); \
  191. src1+=src_stride1; \
  192. src2+=src_stride2; \
  193. dst+=dst_stride; \
  194. } while(--h); \
  195. }\
  196. \
  197. static inline void OPNAME ## _no_rnd_pixels16_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  198. { OPNAME ## _no_rnd_pixels16_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
  199. \
  200. static inline void OPNAME ## _pixels16_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  201. { OPNAME ## _pixels16_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
  202. \
  203. static inline void OPNAME ## _no_rnd_pixels8_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  204. { OPNAME ## _no_rnd_pixels8_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
  205. \
  206. static inline void OPNAME ## _pixels8_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  207. { OPNAME ## _pixels8_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
  208. \
  209. static inline void OPNAME ## _pixels8_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  210. do { \
  211. uint32_t a0,a1,a2,a3; \
  212. UNPACK(a0,a1,LP(src1),LP(src2)); \
  213. UNPACK(a2,a3,LP(src3),LP(src4)); \
  214. OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
  215. UNPACK(a0,a1,LP(src1+4),LP(src2+4)); \
  216. UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
  217. OP(LP(dst+4),rnd_PACK(a0,a1,a2,a3)); \
  218. src1+=src_stride1;\
  219. src2+=src_stride2;\
  220. src3+=src_stride3;\
  221. src4+=src_stride4;\
  222. dst+=dst_stride;\
  223. } while(--h); \
  224. } \
  225. \
  226. static inline void OPNAME ## _no_rnd_pixels8_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  227. do { \
  228. uint32_t a0,a1,a2,a3; \
  229. UNPACK(a0,a1,LP(src1),LP(src2)); \
  230. UNPACK(a2,a3,LP(src3),LP(src4)); \
  231. OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
  232. UNPACK(a0,a1,LP(src1+4),LP(src2+4)); \
  233. UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
  234. OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
  235. src1+=src_stride1;\
  236. src2+=src_stride2;\
  237. src3+=src_stride3;\
  238. src4+=src_stride4;\
  239. dst+=dst_stride;\
  240. } while(--h); \
  241. } \
  242. \
  243. static inline void OPNAME ## _pixels8_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  244. do { \
  245. uint32_t a0,a1,a2,a3; /* src1 only not aligned */\
  246. UNPACK(a0,a1,LD32(src1),LP(src2)); \
  247. UNPACK(a2,a3,LP(src3),LP(src4)); \
  248. OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
  249. UNPACK(a0,a1,LD32(src1+4),LP(src2+4)); \
  250. UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
  251. OP(LP(dst+4),rnd_PACK(a0,a1,a2,a3)); \
  252. src1+=src_stride1;\
  253. src2+=src_stride2;\
  254. src3+=src_stride3;\
  255. src4+=src_stride4;\
  256. dst+=dst_stride;\
  257. } while(--h); \
  258. } \
  259. \
  260. static inline void OPNAME ## _no_rnd_pixels8_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  261. do { \
  262. uint32_t a0,a1,a2,a3; \
  263. UNPACK(a0,a1,LD32(src1),LP(src2)); \
  264. UNPACK(a2,a3,LP(src3),LP(src4)); \
  265. OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
  266. UNPACK(a0,a1,LD32(src1+4),LP(src2+4)); \
  267. UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
  268. OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
  269. src1+=src_stride1;\
  270. src2+=src_stride2;\
  271. src3+=src_stride3;\
  272. src4+=src_stride4;\
  273. dst+=dst_stride;\
  274. } while(--h); \
  275. } \
  276. \
  277. static inline void OPNAME ## _pixels16_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  278. do { \
  279. uint32_t a0,a1,a2,a3; \
  280. UNPACK(a0,a1,LP(src1),LP(src2)); \
  281. UNPACK(a2,a3,LP(src3),LP(src4)); \
  282. OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
  283. UNPACK(a0,a1,LP(src1+4),LP(src2+4)); \
  284. UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
  285. OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
  286. UNPACK(a0,a1,LP(src1+8),LP(src2+8)); \
  287. UNPACK(a2,a3,LP(src3+8),LP(src4+8)); \
  288. OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
  289. UNPACK(a0,a1,LP(src1+12),LP(src2+12)); \
  290. UNPACK(a2,a3,LP(src3+12),LP(src4+12)); \
  291. OP(LP(dst+12),rnd_PACK(a0,a1,a2,a3)); \
  292. src1+=src_stride1;\
  293. src2+=src_stride2;\
  294. src3+=src_stride3;\
  295. src4+=src_stride4;\
  296. dst+=dst_stride;\
  297. } while(--h); \
  298. } \
  299. \
  300. static inline void OPNAME ## _no_rnd_pixels16_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  301. do { \
  302. uint32_t a0,a1,a2,a3; \
  303. UNPACK(a0,a1,LP(src1),LP(src2)); \
  304. UNPACK(a2,a3,LP(src3),LP(src4)); \
  305. OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
  306. UNPACK(a0,a1,LP(src1+4),LP(src2+4)); \
  307. UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
  308. OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
  309. UNPACK(a0,a1,LP(src1+8),LP(src2+8)); \
  310. UNPACK(a2,a3,LP(src3+8),LP(src4+8)); \
  311. OP(LP(dst+8),no_rnd_PACK(a0,a1,a2,a3)); \
  312. UNPACK(a0,a1,LP(src1+12),LP(src2+12)); \
  313. UNPACK(a2,a3,LP(src3+12),LP(src4+12)); \
  314. OP(LP(dst+12),no_rnd_PACK(a0,a1,a2,a3)); \
  315. src1+=src_stride1;\
  316. src2+=src_stride2;\
  317. src3+=src_stride3;\
  318. src4+=src_stride4;\
  319. dst+=dst_stride;\
  320. } while(--h); \
  321. } \
  322. \
  323. static inline void OPNAME ## _pixels16_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  324. do { /* src1 is unaligned */\
  325. uint32_t a0,a1,a2,a3; \
  326. UNPACK(a0,a1,LD32(src1),LP(src2)); \
  327. UNPACK(a2,a3,LP(src3),LP(src4)); \
  328. OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
  329. UNPACK(a0,a1,LD32(src1+4),LP(src2+4)); \
  330. UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
  331. OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
  332. UNPACK(a0,a1,LD32(src1+8),LP(src2+8)); \
  333. UNPACK(a2,a3,LP(src3+8),LP(src4+8)); \
  334. OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
  335. UNPACK(a0,a1,LD32(src1+12),LP(src2+12)); \
  336. UNPACK(a2,a3,LP(src3+12),LP(src4+12)); \
  337. OP(LP(dst+12),rnd_PACK(a0,a1,a2,a3)); \
  338. src1+=src_stride1;\
  339. src2+=src_stride2;\
  340. src3+=src_stride3;\
  341. src4+=src_stride4;\
  342. dst+=dst_stride;\
  343. } while(--h); \
  344. } \
  345. \
  346. static inline void OPNAME ## _no_rnd_pixels16_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  347. do { \
  348. uint32_t a0,a1,a2,a3; \
  349. UNPACK(a0,a1,LD32(src1),LP(src2)); \
  350. UNPACK(a2,a3,LP(src3),LP(src4)); \
  351. OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
  352. UNPACK(a0,a1,LD32(src1+4),LP(src2+4)); \
  353. UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
  354. OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
  355. UNPACK(a0,a1,LD32(src1+8),LP(src2+8)); \
  356. UNPACK(a2,a3,LP(src3+8),LP(src4+8)); \
  357. OP(LP(dst+8),no_rnd_PACK(a0,a1,a2,a3)); \
  358. UNPACK(a0,a1,LD32(src1+12),LP(src2+12)); \
  359. UNPACK(a2,a3,LP(src3+12),LP(src4+12)); \
  360. OP(LP(dst+12),no_rnd_PACK(a0,a1,a2,a3)); \
  361. src1+=src_stride1;\
  362. src2+=src_stride2;\
  363. src3+=src_stride3;\
  364. src4+=src_stride4;\
  365. dst+=dst_stride;\
  366. } while(--h); \
  367. } \
  368. \
  369. #define op_avg(a, b) a = rnd_avg32(a,b)
  370. #define op_put(a, b) a = b
  371. PIXOP2(avg, op_avg)
  372. PIXOP2(put, op_put)
  373. #undef op_avg
  374. #undef op_put
  375. #define avg2(a,b) ((a+b+1)>>1)
  376. #define avg4(a,b,c,d) ((a+b+c+d+2)>>2)
  377. static void gmc1_c(uint8_t *dst, uint8_t *src, int stride, int h, int x16, int y16, int rounder)
  378. {
  379. const int A=(16-x16)*(16-y16);
  380. const int B=( x16)*(16-y16);
  381. const int C=(16-x16)*( y16);
  382. const int D=( x16)*( y16);
  383. do {
  384. int t0,t1,t2,t3;
  385. uint8_t *s0 = src;
  386. uint8_t *s1 = src+stride;
  387. t0 = *s0++; t2 = *s1++;
  388. t1 = *s0++; t3 = *s1++;
  389. dst[0]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
  390. t0 = *s0++; t2 = *s1++;
  391. dst[1]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
  392. t1 = *s0++; t3 = *s1++;
  393. dst[2]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
  394. t0 = *s0++; t2 = *s1++;
  395. dst[3]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
  396. t1 = *s0++; t3 = *s1++;
  397. dst[4]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
  398. t0 = *s0++; t2 = *s1++;
  399. dst[5]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
  400. t1 = *s0++; t3 = *s1++;
  401. dst[6]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
  402. t0 = *s0++; t2 = *s1++;
  403. dst[7]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
  404. dst+= stride;
  405. src+= stride;
  406. }while(--h);
  407. }
  408. static void gmc_c(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
  409. int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
  410. {
  411. int y, vx, vy;
  412. const int s= 1<<shift;
  413. width--;
  414. height--;
  415. for(y=0; y<h; y++){
  416. int x;
  417. vx= ox;
  418. vy= oy;
  419. for(x=0; x<8; x++){ //XXX FIXME optimize
  420. int src_x, src_y, frac_x, frac_y, index;
  421. src_x= vx>>16;
  422. src_y= vy>>16;
  423. frac_x= src_x&(s-1);
  424. frac_y= src_y&(s-1);
  425. src_x>>=shift;
  426. src_y>>=shift;
  427. if((unsigned)src_x < width){
  428. if((unsigned)src_y < height){
  429. index= src_x + src_y*stride;
  430. dst[y*stride + x]= ( ( src[index ]*(s-frac_x)
  431. + src[index +1]* frac_x )*(s-frac_y)
  432. + ( src[index+stride ]*(s-frac_x)
  433. + src[index+stride+1]* frac_x )* frac_y
  434. + r)>>(shift*2);
  435. }else{
  436. index= src_x + clip(src_y, 0, height)*stride;
  437. dst[y*stride + x]= ( ( src[index ]*(s-frac_x)
  438. + src[index +1]* frac_x )*s
  439. + r)>>(shift*2);
  440. }
  441. }else{
  442. if((unsigned)src_y < height){
  443. index= clip(src_x, 0, width) + src_y*stride;
  444. dst[y*stride + x]= ( ( src[index ]*(s-frac_y)
  445. + src[index+stride ]* frac_y )*s
  446. + r)>>(shift*2);
  447. }else{
  448. index= clip(src_x, 0, width) + clip(src_y, 0, height)*stride;
  449. dst[y*stride + x]= src[index ];
  450. }
  451. }
  452. vx+= dxx;
  453. vy+= dyx;
  454. }
  455. ox += dxy;
  456. oy += dyy;
  457. }
  458. }
  459. #define H264_CHROMA_MC(OPNAME, OP)\
  460. static void OPNAME ## h264_chroma_mc2_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
  461. const int A=(8-x)*(8-y);\
  462. const int B=( x)*(8-y);\
  463. const int C=(8-x)*( y);\
  464. const int D=( x)*( y);\
  465. \
  466. assert(x<8 && y<8 && x>=0 && y>=0);\
  467. \
  468. do {\
  469. int t0,t1,t2,t3; \
  470. uint8_t *s0 = src; \
  471. uint8_t *s1 = src+stride; \
  472. t0 = *s0++; t2 = *s1++; \
  473. t1 = *s0++; t3 = *s1++; \
  474. OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
  475. t0 = *s0++; t2 = *s1++; \
  476. OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
  477. dst+= stride;\
  478. src+= stride;\
  479. }while(--h);\
  480. }\
  481. \
  482. static void OPNAME ## h264_chroma_mc4_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
  483. const int A=(8-x)*(8-y);\
  484. const int B=( x)*(8-y);\
  485. const int C=(8-x)*( y);\
  486. const int D=( x)*( y);\
  487. \
  488. assert(x<8 && y<8 && x>=0 && y>=0);\
  489. \
  490. do {\
  491. int t0,t1,t2,t3; \
  492. uint8_t *s0 = src; \
  493. uint8_t *s1 = src+stride; \
  494. t0 = *s0++; t2 = *s1++; \
  495. t1 = *s0++; t3 = *s1++; \
  496. OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
  497. t0 = *s0++; t2 = *s1++; \
  498. OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
  499. t1 = *s0++; t3 = *s1++; \
  500. OP(dst[2], (A*t0 + B*t1 + C*t2 + D*t3));\
  501. t0 = *s0++; t2 = *s1++; \
  502. OP(dst[3], (A*t1 + B*t0 + C*t3 + D*t2));\
  503. dst+= stride;\
  504. src+= stride;\
  505. }while(--h);\
  506. }\
  507. \
  508. static void OPNAME ## h264_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
  509. const int A=(8-x)*(8-y);\
  510. const int B=( x)*(8-y);\
  511. const int C=(8-x)*( y);\
  512. const int D=( x)*( y);\
  513. \
  514. assert(x<8 && y<8 && x>=0 && y>=0);\
  515. \
  516. do {\
  517. int t0,t1,t2,t3; \
  518. uint8_t *s0 = src; \
  519. uint8_t *s1 = src+stride; \
  520. t0 = *s0++; t2 = *s1++; \
  521. t1 = *s0++; t3 = *s1++; \
  522. OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
  523. t0 = *s0++; t2 = *s1++; \
  524. OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
  525. t1 = *s0++; t3 = *s1++; \
  526. OP(dst[2], (A*t0 + B*t1 + C*t2 + D*t3));\
  527. t0 = *s0++; t2 = *s1++; \
  528. OP(dst[3], (A*t1 + B*t0 + C*t3 + D*t2));\
  529. t1 = *s0++; t3 = *s1++; \
  530. OP(dst[4], (A*t0 + B*t1 + C*t2 + D*t3));\
  531. t0 = *s0++; t2 = *s1++; \
  532. OP(dst[5], (A*t1 + B*t0 + C*t3 + D*t2));\
  533. t1 = *s0++; t3 = *s1++; \
  534. OP(dst[6], (A*t0 + B*t1 + C*t2 + D*t3));\
  535. t0 = *s0++; t2 = *s1++; \
  536. OP(dst[7], (A*t1 + B*t0 + C*t3 + D*t2));\
  537. dst+= stride;\
  538. src+= stride;\
  539. }while(--h);\
  540. }
  541. #define op_avg(a, b) a = (((a)+(((b) + 32)>>6)+1)>>1)
  542. #define op_put(a, b) a = (((b) + 32)>>6)
  543. H264_CHROMA_MC(put_ , op_put)
  544. H264_CHROMA_MC(avg_ , op_avg)
  545. #undef op_avg
  546. #undef op_put
  547. /* not yet optimized */
  548. static inline void copy_block4(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
  549. {
  550. int i;
  551. for(i=0; i<h; i++)
  552. {
  553. ST32(dst , LD32(src ));
  554. dst+=dstStride;
  555. src+=srcStride;
  556. }
  557. }
  558. static inline void copy_block8(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
  559. {
  560. int i;
  561. for(i=0; i<h; i++)
  562. {
  563. ST32(dst , LD32(src ));
  564. ST32(dst+4 , LD32(src+4 ));
  565. dst+=dstStride;
  566. src+=srcStride;
  567. }
  568. }
  569. static inline void copy_block16(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
  570. {
  571. int i;
  572. for(i=0; i<h; i++)
  573. {
  574. ST32(dst , LD32(src ));
  575. ST32(dst+4 , LD32(src+4 ));
  576. ST32(dst+8 , LD32(src+8 ));
  577. ST32(dst+12, LD32(src+12));
  578. dst+=dstStride;
  579. src+=srcStride;
  580. }
  581. }
  582. static inline void copy_block17(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
  583. {
  584. int i;
  585. for(i=0; i<h; i++)
  586. {
  587. ST32(dst , LD32(src ));
  588. ST32(dst+4 , LD32(src+4 ));
  589. ST32(dst+8 , LD32(src+8 ));
  590. ST32(dst+12, LD32(src+12));
  591. dst[16]= src[16];
  592. dst+=dstStride;
  593. src+=srcStride;
  594. }
  595. }
  596. static inline void copy_block9(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
  597. {
  598. int i;
  599. for(i=0; i<h; i++)
  600. {
  601. ST32(dst , LD32(src ));
  602. ST32(dst+4 , LD32(src+4 ));
  603. dst[8]= src[8];
  604. dst+=dstStride;
  605. src+=srcStride;
  606. }
  607. }
  608. /* end not optimized */
  609. #define QPEL_MC(r, OPNAME, RND, OP) \
  610. static void OPNAME ## mpeg4_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  611. uint8_t *cm = cropTbl + MAX_NEG_CROP;\
  612. do {\
  613. uint8_t *s = src; \
  614. int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
  615. src0= *s++;\
  616. src1= *s++;\
  617. src2= *s++;\
  618. src3= *s++;\
  619. src4= *s++;\
  620. OP(dst[0], (src0+src1)*20 - (src0+src2)*6 + (src1+src3)*3 - (src2+src4));\
  621. src5= *s++;\
  622. OP(dst[1], (src1+src2)*20 - (src0+src3)*6 + (src0+src4)*3 - (src1+src5));\
  623. src6= *s++;\
  624. OP(dst[2], (src2+src3)*20 - (src1+src4)*6 + (src0+src5)*3 - (src0+src6));\
  625. src7= *s++;\
  626. OP(dst[3], (src3+src4)*20 - (src2+src5)*6 + (src1+src6)*3 - (src0+src7));\
  627. src8= *s++;\
  628. OP(dst[4], (src4+src5)*20 - (src3+src6)*6 + (src2+src7)*3 - (src1+src8));\
  629. OP(dst[5], (src5+src6)*20 - (src4+src7)*6 + (src3+src8)*3 - (src2+src8));\
  630. OP(dst[6], (src6+src7)*20 - (src5+src8)*6 + (src4+src8)*3 - (src3+src7));\
  631. OP(dst[7], (src7+src8)*20 - (src6+src8)*6 + (src5+src7)*3 - (src4+src6));\
  632. dst+=dstStride;\
  633. src+=srcStride;\
  634. }while(--h);\
  635. }\
  636. \
  637. static void OPNAME ## mpeg4_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  638. uint8_t *cm = cropTbl + MAX_NEG_CROP;\
  639. int w=8;\
  640. do{\
  641. uint8_t *s = src, *d=dst;\
  642. int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
  643. src0 = *s; s+=srcStride; \
  644. src1 = *s; s+=srcStride; \
  645. src2 = *s; s+=srcStride; \
  646. src3 = *s; s+=srcStride; \
  647. src4 = *s; s+=srcStride; \
  648. OP(*d, (src0+src1)*20 - (src0+src2)*6 + (src1+src3)*3 - (src2+src4));d+=dstStride;\
  649. src5 = *s; s+=srcStride; \
  650. OP(*d, (src1+src2)*20 - (src0+src3)*6 + (src0+src4)*3 - (src1+src5));d+=dstStride;\
  651. src6 = *s; s+=srcStride; \
  652. OP(*d, (src2+src3)*20 - (src1+src4)*6 + (src0+src5)*3 - (src0+src6));d+=dstStride;\
  653. src7 = *s; s+=srcStride; \
  654. OP(*d, (src3+src4)*20 - (src2+src5)*6 + (src1+src6)*3 - (src0+src7));d+=dstStride;\
  655. src8 = *s; \
  656. OP(*d, (src4+src5)*20 - (src3+src6)*6 + (src2+src7)*3 - (src1+src8));d+=dstStride;\
  657. OP(*d, (src5+src6)*20 - (src4+src7)*6 + (src3+src8)*3 - (src2+src8));d+=dstStride;\
  658. OP(*d, (src6+src7)*20 - (src5+src8)*6 + (src4+src8)*3 - (src3+src7));d+=dstStride;\
  659. OP(*d, (src7+src8)*20 - (src6+src8)*6 + (src5+src7)*3 - (src4+src6));\
  660. dst++;\
  661. src++;\
  662. }while(--w);\
  663. }\
  664. \
  665. static void OPNAME ## mpeg4_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  666. uint8_t *cm = cropTbl + MAX_NEG_CROP;\
  667. do {\
  668. uint8_t *s = src;\
  669. int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
  670. int src9,src10,src11,src12,src13,src14,src15,src16;\
  671. src0= *s++;\
  672. src1= *s++;\
  673. src2= *s++;\
  674. src3= *s++;\
  675. src4= *s++;\
  676. OP(dst[ 0], (src0 +src1 )*20 - (src0 +src2 )*6 + (src1 +src3 )*3 - (src2 +src4 ));\
  677. src5= *s++;\
  678. OP(dst[ 1], (src1 +src2 )*20 - (src0 +src3 )*6 + (src0 +src4 )*3 - (src1 +src5 ));\
  679. src6= *s++;\
  680. OP(dst[ 2], (src2 +src3 )*20 - (src1 +src4 )*6 + (src0 +src5 )*3 - (src0 +src6 ));\
  681. src7= *s++;\
  682. OP(dst[ 3], (src3 +src4 )*20 - (src2 +src5 )*6 + (src1 +src6 )*3 - (src0 +src7 ));\
  683. src8= *s++;\
  684. OP(dst[ 4], (src4 +src5 )*20 - (src3 +src6 )*6 + (src2 +src7 )*3 - (src1 +src8 ));\
  685. src9= *s++;\
  686. OP(dst[ 5], (src5 +src6 )*20 - (src4 +src7 )*6 + (src3 +src8 )*3 - (src2 +src9 ));\
  687. src10= *s++;\
  688. OP(dst[ 6], (src6 +src7 )*20 - (src5 +src8 )*6 + (src4 +src9 )*3 - (src3 +src10));\
  689. src11= *s++;\
  690. OP(dst[ 7], (src7 +src8 )*20 - (src6 +src9 )*6 + (src5 +src10)*3 - (src4 +src11));\
  691. src12= *s++;\
  692. OP(dst[ 8], (src8 +src9 )*20 - (src7 +src10)*6 + (src6 +src11)*3 - (src5 +src12));\
  693. src13= *s++;\
  694. OP(dst[ 9], (src9 +src10)*20 - (src8 +src11)*6 + (src7 +src12)*3 - (src6 +src13));\
  695. src14= *s++;\
  696. OP(dst[10], (src10+src11)*20 - (src9 +src12)*6 + (src8 +src13)*3 - (src7 +src14));\
  697. src15= *s++;\
  698. OP(dst[11], (src11+src12)*20 - (src10+src13)*6 + (src9 +src14)*3 - (src8 +src15));\
  699. src16= *s++;\
  700. OP(dst[12], (src12+src13)*20 - (src11+src14)*6 + (src10+src15)*3 - (src9 +src16));\
  701. OP(dst[13], (src13+src14)*20 - (src12+src15)*6 + (src11+src16)*3 - (src10+src16));\
  702. OP(dst[14], (src14+src15)*20 - (src13+src16)*6 + (src12+src16)*3 - (src11+src15));\
  703. OP(dst[15], (src15+src16)*20 - (src14+src16)*6 + (src13+src15)*3 - (src12+src14));\
  704. dst+=dstStride;\
  705. src+=srcStride;\
  706. }while(--h);\
  707. }\
  708. \
  709. static void OPNAME ## mpeg4_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  710. uint8_t *cm = cropTbl + MAX_NEG_CROP;\
  711. int w=16;\
  712. do {\
  713. uint8_t *s = src, *d=dst;\
  714. int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
  715. int src9,src10,src11,src12,src13,src14,src15,src16;\
  716. src0 = *s; s+=srcStride; \
  717. src1 = *s; s+=srcStride; \
  718. src2 = *s; s+=srcStride; \
  719. src3 = *s; s+=srcStride; \
  720. src4 = *s; s+=srcStride; \
  721. OP(*d, (src0 +src1 )*20 - (src0 +src2 )*6 + (src1 +src3 )*3 - (src2 +src4 ));d+=dstStride;\
  722. src5 = *s; s+=srcStride; \
  723. OP(*d, (src1 +src2 )*20 - (src0 +src3 )*6 + (src0 +src4 )*3 - (src1 +src5 ));d+=dstStride;\
  724. src6 = *s; s+=srcStride; \
  725. OP(*d, (src2 +src3 )*20 - (src1 +src4 )*6 + (src0 +src5 )*3 - (src0 +src6 ));d+=dstStride;\
  726. src7 = *s; s+=srcStride; \
  727. OP(*d, (src3 +src4 )*20 - (src2 +src5 )*6 + (src1 +src6 )*3 - (src0 +src7 ));d+=dstStride;\
  728. src8 = *s; s+=srcStride; \
  729. OP(*d, (src4 +src5 )*20 - (src3 +src6 )*6 + (src2 +src7 )*3 - (src1 +src8 ));d+=dstStride;\
  730. src9 = *s; s+=srcStride; \
  731. OP(*d, (src5 +src6 )*20 - (src4 +src7 )*6 + (src3 +src8 )*3 - (src2 +src9 ));d+=dstStride;\
  732. src10 = *s; s+=srcStride; \
  733. OP(*d, (src6 +src7 )*20 - (src5 +src8 )*6 + (src4 +src9 )*3 - (src3 +src10));d+=dstStride;\
  734. src11 = *s; s+=srcStride; \
  735. OP(*d, (src7 +src8 )*20 - (src6 +src9 )*6 + (src5 +src10)*3 - (src4 +src11));d+=dstStride;\
  736. src12 = *s; s+=srcStride; \
  737. OP(*d, (src8 +src9 )*20 - (src7 +src10)*6 + (src6 +src11)*3 - (src5 +src12));d+=dstStride;\
  738. src13 = *s; s+=srcStride; \
  739. OP(*d, (src9 +src10)*20 - (src8 +src11)*6 + (src7 +src12)*3 - (src6 +src13));d+=dstStride;\
  740. src14 = *s; s+=srcStride; \
  741. OP(*d, (src10+src11)*20 - (src9 +src12)*6 + (src8 +src13)*3 - (src7 +src14));d+=dstStride;\
  742. src15 = *s; s+=srcStride; \
  743. OP(*d, (src11+src12)*20 - (src10+src13)*6 + (src9 +src14)*3 - (src8 +src15));d+=dstStride;\
  744. src16 = *s; \
  745. OP(*d, (src12+src13)*20 - (src11+src14)*6 + (src10+src15)*3 - (src9 +src16));d+=dstStride;\
  746. OP(*d, (src13+src14)*20 - (src12+src15)*6 + (src11+src16)*3 - (src10+src16));d+=dstStride;\
  747. OP(*d, (src14+src15)*20 - (src13+src16)*6 + (src12+src16)*3 - (src11+src15));d+=dstStride;\
  748. OP(*d, (src15+src16)*20 - (src14+src16)*6 + (src13+src15)*3 - (src12+src14));\
  749. dst++;\
  750. src++;\
  751. }while(--w);\
  752. }\
  753. \
  754. static void OPNAME ## qpel8_mc00_c (uint8_t *dst, uint8_t *src, int stride){\
  755. OPNAME ## pixels8_c(dst, src, stride, 8);\
  756. }\
  757. \
  758. static void OPNAME ## qpel8_mc10_c(uint8_t *dst, uint8_t *src, int stride){\
  759. uint8_t half[64];\
  760. put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
  761. OPNAME ## pixels8_l2_aligned2(dst, src, half, stride, stride, 8, 8);\
  762. }\
  763. \
  764. static void OPNAME ## qpel8_mc20_c(uint8_t *dst, uint8_t *src, int stride){\
  765. OPNAME ## mpeg4_qpel8_h_lowpass(dst, src, stride, stride, 8);\
  766. }\
  767. \
  768. static void OPNAME ## qpel8_mc30_c(uint8_t *dst, uint8_t *src, int stride){\
  769. uint8_t half[64];\
  770. put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
  771. OPNAME ## pixels8_l2_aligned2(dst, src+1, half, stride, stride, 8, 8);\
  772. }\
  773. \
  774. static void OPNAME ## qpel8_mc01_c(uint8_t *dst, uint8_t *src, int stride){\
  775. uint8_t full[16*9];\
  776. uint8_t half[64];\
  777. copy_block9(full, src, 16, stride, 9);\
  778. put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
  779. OPNAME ## pixels8_l2_aligned(dst, full, half, stride, 16, 8, 8);\
  780. }\
  781. \
  782. static void OPNAME ## qpel8_mc02_c(uint8_t *dst, uint8_t *src, int stride){\
  783. uint8_t full[16*9];\
  784. copy_block9(full, src, 16, stride, 9);\
  785. OPNAME ## mpeg4_qpel8_v_lowpass(dst, full, stride, 16);\
  786. }\
  787. \
  788. static void OPNAME ## qpel8_mc03_c(uint8_t *dst, uint8_t *src, int stride){\
  789. uint8_t full[16*9];\
  790. uint8_t half[64];\
  791. copy_block9(full, src, 16, stride, 9);\
  792. put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
  793. OPNAME ## pixels8_l2_aligned(dst, full+16, half, stride, 16, 8, 8);\
  794. }\
  795. static void ff_ ## OPNAME ## qpel8_mc11_old_c(uint8_t *dst, uint8_t *src, int stride){\
  796. uint8_t full[16*9];\
  797. uint8_t halfH[72];\
  798. uint8_t halfV[64];\
  799. uint8_t halfHV[64];\
  800. copy_block9(full, src, 16, stride, 9);\
  801. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  802. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
  803. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  804. OPNAME ## pixels8_l4_aligned(dst, full, halfH, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
  805. }\
  806. static void OPNAME ## qpel8_mc11_c(uint8_t *dst, uint8_t *src, int stride){\
  807. uint8_t full[16*9];\
  808. uint8_t halfH[72];\
  809. uint8_t halfHV[64];\
  810. copy_block9(full, src, 16, stride, 9);\
  811. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  812. put ## RND ## pixels8_l2_aligned(halfH, halfH, full, 8, 8, 16, 9);\
  813. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  814. OPNAME ## pixels8_l2_aligned(dst, halfH, halfHV, stride, 8, 8, 8);\
  815. }\
  816. static void ff_ ## OPNAME ## qpel8_mc31_old_c(uint8_t *dst, uint8_t *src, int stride){\
  817. uint8_t full[16*9];\
  818. uint8_t halfH[72];\
  819. uint8_t halfV[64];\
  820. uint8_t halfHV[64];\
  821. copy_block9(full, src, 16, stride, 9);\
  822. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  823. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
  824. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  825. OPNAME ## pixels8_l4_aligned0(dst, full+1, halfH, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
  826. }\
  827. static void OPNAME ## qpel8_mc31_c(uint8_t *dst, uint8_t *src, int stride){\
  828. uint8_t full[16*9];\
  829. uint8_t halfH[72];\
  830. uint8_t halfHV[64];\
  831. copy_block9(full, src, 16, stride, 9);\
  832. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  833. put ## RND ## pixels8_l2_aligned1(halfH, halfH, full+1, 8, 8, 16, 9);\
  834. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  835. OPNAME ## pixels8_l2_aligned(dst, halfH, halfHV, stride, 8, 8, 8);\
  836. }\
  837. static void ff_ ## OPNAME ## qpel8_mc13_old_c(uint8_t *dst, uint8_t *src, int stride){\
  838. uint8_t full[16*9];\
  839. uint8_t halfH[72];\
  840. uint8_t halfV[64];\
  841. uint8_t halfHV[64];\
  842. copy_block9(full, src, 16, stride, 9);\
  843. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  844. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
  845. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  846. OPNAME ## pixels8_l4_aligned(dst, full+16, halfH+8, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
  847. }\
  848. static void OPNAME ## qpel8_mc13_c(uint8_t *dst, uint8_t *src, int stride){\
  849. uint8_t full[16*9];\
  850. uint8_t halfH[72];\
  851. uint8_t halfHV[64];\
  852. copy_block9(full, src, 16, stride, 9);\
  853. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  854. put ## RND ## pixels8_l2_aligned(halfH, halfH, full, 8, 8, 16, 9);\
  855. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  856. OPNAME ## pixels8_l2_aligned(dst, halfH+8, halfHV, stride, 8, 8, 8);\
  857. }\
  858. static void ff_ ## OPNAME ## qpel8_mc33_old_c(uint8_t *dst, uint8_t *src, int stride){\
  859. uint8_t full[16*9];\
  860. uint8_t halfH[72];\
  861. uint8_t halfV[64];\
  862. uint8_t halfHV[64];\
  863. copy_block9(full, src, 16, stride, 9);\
  864. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full , 8, 16, 9);\
  865. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
  866. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  867. OPNAME ## pixels8_l4_aligned0(dst, full+17, halfH+8, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
  868. }\
  869. static void OPNAME ## qpel8_mc33_c(uint8_t *dst, uint8_t *src, int stride){\
  870. uint8_t full[16*9];\
  871. uint8_t halfH[72];\
  872. uint8_t halfHV[64];\
  873. copy_block9(full, src, 16, stride, 9);\
  874. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  875. put ## RND ## pixels8_l2_aligned1(halfH, halfH, full+1, 8, 8, 16, 9);\
  876. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  877. OPNAME ## pixels8_l2_aligned(dst, halfH+8, halfHV, stride, 8, 8, 8);\
  878. }\
  879. static void OPNAME ## qpel8_mc21_c(uint8_t *dst, uint8_t *src, int stride){\
  880. uint8_t halfH[72];\
  881. uint8_t halfHV[64];\
  882. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
  883. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  884. OPNAME ## pixels8_l2_aligned(dst, halfH, halfHV, stride, 8, 8, 8);\
  885. }\
  886. static void OPNAME ## qpel8_mc23_c(uint8_t *dst, uint8_t *src, int stride){\
  887. uint8_t halfH[72];\
  888. uint8_t halfHV[64];\
  889. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
  890. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  891. OPNAME ## pixels8_l2_aligned(dst, halfH+8, halfHV, stride, 8, 8, 8);\
  892. }\
  893. static void ff_ ## OPNAME ## qpel8_mc12_old_c(uint8_t *dst, uint8_t *src, int stride){\
  894. uint8_t full[16*9];\
  895. uint8_t halfH[72];\
  896. uint8_t halfV[64];\
  897. uint8_t halfHV[64];\
  898. copy_block9(full, src, 16, stride, 9);\
  899. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  900. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
  901. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  902. OPNAME ## pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);\
  903. }\
  904. static void OPNAME ## qpel8_mc12_c(uint8_t *dst, uint8_t *src, int stride){\
  905. uint8_t full[16*9];\
  906. uint8_t halfH[72];\
  907. copy_block9(full, src, 16, stride, 9);\
  908. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  909. put ## RND ## pixels8_l2_aligned(halfH, halfH, full, 8, 8, 16, 9);\
  910. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
  911. }\
  912. static void ff_ ## OPNAME ## qpel8_mc32_old_c(uint8_t *dst, uint8_t *src, int stride){\
  913. uint8_t full[16*9];\
  914. uint8_t halfH[72];\
  915. uint8_t halfV[64];\
  916. uint8_t halfHV[64];\
  917. copy_block9(full, src, 16, stride, 9);\
  918. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  919. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
  920. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  921. OPNAME ## pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);\
  922. }\
  923. static void OPNAME ## qpel8_mc32_c(uint8_t *dst, uint8_t *src, int stride){\
  924. uint8_t full[16*9];\
  925. uint8_t halfH[72];\
  926. copy_block9(full, src, 16, stride, 9);\
  927. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  928. put ## RND ## pixels8_l2_aligned1(halfH, halfH, full+1, 8, 8, 16, 9);\
  929. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
  930. }\
  931. static void OPNAME ## qpel8_mc22_c(uint8_t *dst, uint8_t *src, int stride){\
  932. uint8_t halfH[72];\
  933. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
  934. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
  935. }\
  936. static void OPNAME ## qpel16_mc00_c (uint8_t *dst, uint8_t *src, int stride){\
  937. OPNAME ## pixels16_c(dst, src, stride, 16);\
  938. }\
  939. \
  940. static void OPNAME ## qpel16_mc10_c(uint8_t *dst, uint8_t *src, int stride){\
  941. uint8_t half[256];\
  942. put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
  943. OPNAME ## pixels16_l2_aligned2(dst, src, half, stride, stride, 16, 16);\
  944. }\
  945. \
  946. static void OPNAME ## qpel16_mc20_c(uint8_t *dst, uint8_t *src, int stride){\
  947. OPNAME ## mpeg4_qpel16_h_lowpass(dst, src, stride, stride, 16);\
  948. }\
  949. \
  950. static void OPNAME ## qpel16_mc30_c(uint8_t *dst, uint8_t *src, int stride){\
  951. uint8_t half[256];\
  952. put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
  953. OPNAME ## pixels16_l2_aligned2(dst, src+1, half, stride, stride, 16, 16);\
  954. }\
  955. \
  956. static void OPNAME ## qpel16_mc01_c(uint8_t *dst, uint8_t *src, int stride){\
  957. uint8_t full[24*17];\
  958. uint8_t half[256];\
  959. copy_block17(full, src, 24, stride, 17);\
  960. put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
  961. OPNAME ## pixels16_l2_aligned(dst, full, half, stride, 24, 16, 16);\
  962. }\
  963. \
  964. static void OPNAME ## qpel16_mc02_c(uint8_t *dst, uint8_t *src, int stride){\
  965. uint8_t full[24*17];\
  966. copy_block17(full, src, 24, stride, 17);\
  967. OPNAME ## mpeg4_qpel16_v_lowpass(dst, full, stride, 24);\
  968. }\
  969. \
  970. static void OPNAME ## qpel16_mc03_c(uint8_t *dst, uint8_t *src, int stride){\
  971. uint8_t full[24*17];\
  972. uint8_t half[256];\
  973. copy_block17(full, src, 24, stride, 17);\
  974. put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
  975. OPNAME ## pixels16_l2_aligned(dst, full+24, half, stride, 24, 16, 16);\
  976. }\
  977. static void ff_ ## OPNAME ## qpel16_mc11_old_c(uint8_t *dst, uint8_t *src, int stride){\
  978. uint8_t full[24*17];\
  979. uint8_t halfH[272];\
  980. uint8_t halfV[256];\
  981. uint8_t halfHV[256];\
  982. copy_block17(full, src, 24, stride, 17);\
  983. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  984. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
  985. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  986. OPNAME ## pixels16_l4_aligned(dst, full, halfH, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
  987. }\
  988. static void OPNAME ## qpel16_mc11_c(uint8_t *dst, uint8_t *src, int stride){\
  989. uint8_t full[24*17];\
  990. uint8_t halfH[272];\
  991. uint8_t halfHV[256];\
  992. copy_block17(full, src, 24, stride, 17);\
  993. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  994. put ## RND ## pixels16_l2_aligned(halfH, halfH, full, 16, 16, 24, 17);\
  995. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  996. OPNAME ## pixels16_l2_aligned(dst, halfH, halfHV, stride, 16, 16, 16);\
  997. }\
  998. static void ff_ ## OPNAME ## qpel16_mc31_old_c(uint8_t *dst, uint8_t *src, int stride){\
  999. uint8_t full[24*17];\
  1000. uint8_t halfH[272];\
  1001. uint8_t halfV[256];\
  1002. uint8_t halfHV[256];\
  1003. copy_block17(full, src, 24, stride, 17);\
  1004. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1005. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
  1006. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1007. OPNAME ## pixels16_l4_aligned0(dst, full+1, halfH, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
  1008. }\
  1009. static void OPNAME ## qpel16_mc31_c(uint8_t *dst, uint8_t *src, int stride){\
  1010. uint8_t full[24*17];\
  1011. uint8_t halfH[272];\
  1012. uint8_t halfHV[256];\
  1013. copy_block17(full, src, 24, stride, 17);\
  1014. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1015. put ## RND ## pixels16_l2_aligned1(halfH, halfH, full+1, 16, 16, 24, 17);\
  1016. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1017. OPNAME ## pixels16_l2_aligned(dst, halfH, halfHV, stride, 16, 16, 16);\
  1018. }\
  1019. static void ff_ ## OPNAME ## qpel16_mc13_old_c(uint8_t *dst, uint8_t *src, int stride){\
  1020. uint8_t full[24*17];\
  1021. uint8_t halfH[272];\
  1022. uint8_t halfV[256];\
  1023. uint8_t halfHV[256];\
  1024. copy_block17(full, src, 24, stride, 17);\
  1025. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1026. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
  1027. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1028. OPNAME ## pixels16_l4_aligned(dst, full+24, halfH+16, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
  1029. }\
  1030. static void OPNAME ## qpel16_mc13_c(uint8_t *dst, uint8_t *src, int stride){\
  1031. uint8_t full[24*17];\
  1032. uint8_t halfH[272];\
  1033. uint8_t halfHV[256];\
  1034. copy_block17(full, src, 24, stride, 17);\
  1035. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1036. put ## RND ## pixels16_l2_aligned(halfH, halfH, full, 16, 16, 24, 17);\
  1037. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1038. OPNAME ## pixels16_l2_aligned(dst, halfH+16, halfHV, stride, 16, 16, 16);\
  1039. }\
  1040. static void ff_ ## OPNAME ## qpel16_mc33_old_c(uint8_t *dst, uint8_t *src, int stride){\
  1041. uint8_t full[24*17];\
  1042. uint8_t halfH[272];\
  1043. uint8_t halfV[256];\
  1044. uint8_t halfHV[256];\
  1045. copy_block17(full, src, 24, stride, 17);\
  1046. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full , 16, 24, 17);\
  1047. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
  1048. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1049. OPNAME ## pixels16_l4_aligned0(dst, full+25, halfH+16, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
  1050. }\
  1051. static void OPNAME ## qpel16_mc33_c(uint8_t *dst, uint8_t *src, int stride){\
  1052. uint8_t full[24*17];\
  1053. uint8_t halfH[272];\
  1054. uint8_t halfHV[256];\
  1055. copy_block17(full, src, 24, stride, 17);\
  1056. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1057. put ## RND ## pixels16_l2_aligned1(halfH, halfH, full+1, 16, 16, 24, 17);\
  1058. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1059. OPNAME ## pixels16_l2_aligned(dst, halfH+16, halfHV, stride, 16, 16, 16);\
  1060. }\
  1061. static void OPNAME ## qpel16_mc21_c(uint8_t *dst, uint8_t *src, int stride){\
  1062. uint8_t halfH[272];\
  1063. uint8_t halfHV[256];\
  1064. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
  1065. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1066. OPNAME ## pixels16_l2_aligned(dst, halfH, halfHV, stride, 16, 16, 16);\
  1067. }\
  1068. static void OPNAME ## qpel16_mc23_c(uint8_t *dst, uint8_t *src, int stride){\
  1069. uint8_t halfH[272];\
  1070. uint8_t halfHV[256];\
  1071. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
  1072. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1073. OPNAME ## pixels16_l2_aligned(dst, halfH+16, halfHV, stride, 16, 16, 16);\
  1074. }\
  1075. static void ff_ ## OPNAME ## qpel16_mc12_old_c(uint8_t *dst, uint8_t *src, int stride){\
  1076. uint8_t full[24*17];\
  1077. uint8_t halfH[272];\
  1078. uint8_t halfV[256];\
  1079. uint8_t halfHV[256];\
  1080. copy_block17(full, src, 24, stride, 17);\
  1081. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1082. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
  1083. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1084. OPNAME ## pixels16_l2_aligned(dst, halfV, halfHV, stride, 16, 16, 16);\
  1085. }\
  1086. static void OPNAME ## qpel16_mc12_c(uint8_t *dst, uint8_t *src, int stride){\
  1087. uint8_t full[24*17];\
  1088. uint8_t halfH[272];\
  1089. copy_block17(full, src, 24, stride, 17);\
  1090. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1091. put ## RND ## pixels16_l2_aligned(halfH, halfH, full, 16, 16, 24, 17);\
  1092. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
  1093. }\
  1094. static void ff_ ## OPNAME ## qpel16_mc32_old_c(uint8_t *dst, uint8_t *src, int stride){\
  1095. uint8_t full[24*17];\
  1096. uint8_t halfH[272];\
  1097. uint8_t halfV[256];\
  1098. uint8_t halfHV[256];\
  1099. copy_block17(full, src, 24, stride, 17);\
  1100. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1101. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
  1102. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1103. OPNAME ## pixels16_l2_aligned(dst, halfV, halfHV, stride, 16, 16, 16);\
  1104. }\
  1105. static void OPNAME ## qpel16_mc32_c(uint8_t *dst, uint8_t *src, int stride){\
  1106. uint8_t full[24*17];\
  1107. uint8_t halfH[272];\
  1108. copy_block17(full, src, 24, stride, 17);\
  1109. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1110. put ## RND ## pixels16_l2_aligned1(halfH, halfH, full+1, 16, 16, 24, 17);\
  1111. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
  1112. }\
  1113. static void OPNAME ## qpel16_mc22_c(uint8_t *dst, uint8_t *src, int stride){\
  1114. uint8_t halfH[272];\
  1115. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
  1116. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
  1117. }
  1118. #define op_avg(a, b) a = (((a)+cm[((b) + 16)>>5]+1)>>1)
  1119. #define op_avg_no_rnd(a, b) a = (((a)+cm[((b) + 15)>>5])>>1)
  1120. #define op_put(a, b) a = cm[((b) + 16)>>5]
  1121. #define op_put_no_rnd(a, b) a = cm[((b) + 15)>>5]
  1122. QPEL_MC(0, put_ , _ , op_put)
  1123. QPEL_MC(1, put_no_rnd_, _no_rnd_, op_put_no_rnd)
  1124. QPEL_MC(0, avg_ , _ , op_avg)
  1125. //QPEL_MC(1, avg_no_rnd , _ , op_avg)
  1126. #undef op_avg
  1127. #undef op_avg_no_rnd
  1128. #undef op_put
  1129. #undef op_put_no_rnd
  1130. #if 1
  1131. #define H264_LOWPASS(OPNAME, OP, OP2) \
  1132. static inline void OPNAME ## h264_qpel_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,int w,int h){\
  1133. uint8_t *cm = cropTbl + MAX_NEG_CROP;\
  1134. do {\
  1135. int srcB,srcA,src0,src1,src2,src3,src4,src5,src6;\
  1136. uint8_t *s = src-2;\
  1137. srcB = *s++;\
  1138. srcA = *s++;\
  1139. src0 = *s++;\
  1140. src1 = *s++;\
  1141. src2 = *s++;\
  1142. src3 = *s++;\
  1143. OP(dst[0], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
  1144. src4 = *s++;\
  1145. OP(dst[1], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
  1146. src5 = *s++;\
  1147. OP(dst[2], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
  1148. src6 = *s++;\
  1149. OP(dst[3], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
  1150. if (w>4) { /* it optimized */ \
  1151. int src7,src8,src9,src10; \
  1152. src7 = *s++;\
  1153. OP(dst[4], (src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
  1154. src8 = *s++;\
  1155. OP(dst[5], (src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
  1156. src9 = *s++;\
  1157. OP(dst[6], (src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
  1158. src10 = *s++;\
  1159. OP(dst[7], (src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
  1160. if (w>8) { \
  1161. int src11,src12,src13,src14,src15,src16,src17,src18; \
  1162. src11 = *s++;\
  1163. OP(dst[8] , (src8 +src9 )*20 - (src7 +src10)*5 + (src6 +src11));\
  1164. src12 = *s++;\
  1165. OP(dst[9] , (src9 +src10)*20 - (src8 +src11)*5 + (src7 +src12));\
  1166. src13 = *s++;\
  1167. OP(dst[10], (src10+src11)*20 - (src9 +src12)*5 + (src8 +src13));\
  1168. src14 = *s++;\
  1169. OP(dst[11], (src11+src12)*20 - (src10+src13)*5 + (src9 +src14));\
  1170. src15 = *s++;\
  1171. OP(dst[12], (src12+src13)*20 - (src11+src14)*5 + (src10+src15));\
  1172. src16 = *s++;\
  1173. OP(dst[13], (src13+src14)*20 - (src12+src15)*5 + (src11+src16));\
  1174. src17 = *s++;\
  1175. OP(dst[14], (src14+src15)*20 - (src13+src16)*5 + (src12+src17));\
  1176. src18 = *s++;\
  1177. OP(dst[15], (src15+src16)*20 - (src14+src17)*5 + (src13+src18));\
  1178. } \
  1179. } \
  1180. dst+=dstStride;\
  1181. src+=srcStride;\
  1182. }while(--h);\
  1183. }\
  1184. \
  1185. static inline void OPNAME ## h264_qpel_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,int w,int h){\
  1186. uint8_t *cm = cropTbl + MAX_NEG_CROP;\
  1187. do{\
  1188. int srcB,srcA,src0,src1,src2,src3,src4,src5,src6;\
  1189. uint8_t *s = src-2*srcStride,*d=dst;\
  1190. srcB = *s; s+=srcStride;\
  1191. srcA = *s; s+=srcStride;\
  1192. src0 = *s; s+=srcStride;\
  1193. src1 = *s; s+=srcStride;\
  1194. src2 = *s; s+=srcStride;\
  1195. src3 = *s; s+=srcStride;\
  1196. OP(*d, (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));d+=dstStride;\
  1197. src4 = *s; s+=srcStride;\
  1198. OP(*d, (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));d+=dstStride;\
  1199. src5 = *s; s+=srcStride;\
  1200. OP(*d, (src2+src3)*20 - (src1+src4)*5 + (src0+src5));d+=dstStride;\
  1201. src6 = *s; s+=srcStride;\
  1202. OP(*d, (src3+src4)*20 - (src2+src5)*5 + (src1+src6));d+=dstStride;\
  1203. if (h>4) { \
  1204. int src7,src8,src9,src10; \
  1205. src7 = *s; s+=srcStride;\
  1206. OP(*d, (src4+src5)*20 - (src3+src6)*5 + (src2+src7));d+=dstStride;\
  1207. src8 = *s; s+=srcStride;\
  1208. OP(*d, (src5+src6)*20 - (src4+src7)*5 + (src3+src8));d+=dstStride;\
  1209. src9 = *s; s+=srcStride;\
  1210. OP(*d, (src6+src7)*20 - (src5+src8)*5 + (src4+src9));d+=dstStride;\
  1211. src10 = *s; s+=srcStride;\
  1212. OP(*d, (src7+src8)*20 - (src6+src9)*5 + (src5+src10));d+=dstStride;\
  1213. if (h>8) { \
  1214. int src11,src12,src13,src14,src15,src16,src17,src18; \
  1215. src11 = *s; s+=srcStride;\
  1216. OP(*d , (src8 +src9 )*20 - (src7 +src10)*5 + (src6 +src11));d+=dstStride;\
  1217. src12 = *s; s+=srcStride;\
  1218. OP(*d , (src9 +src10)*20 - (src8 +src11)*5 + (src7 +src12));d+=dstStride;\
  1219. src13 = *s; s+=srcStride;\
  1220. OP(*d, (src10+src11)*20 - (src9 +src12)*5 + (src8 +src13));d+=dstStride;\
  1221. src14 = *s; s+=srcStride;\
  1222. OP(*d, (src11+src12)*20 - (src10+src13)*5 + (src9 +src14));d+=dstStride;\
  1223. src15 = *s; s+=srcStride;\
  1224. OP(*d, (src12+src13)*20 - (src11+src14)*5 + (src10+src15));d+=dstStride;\
  1225. src16 = *s; s+=srcStride;\
  1226. OP(*d, (src13+src14)*20 - (src12+src15)*5 + (src11+src16));d+=dstStride;\
  1227. src17 = *s; s+=srcStride;\
  1228. OP(*d, (src14+src15)*20 - (src13+src16)*5 + (src12+src17));d+=dstStride;\
  1229. src18 = *s; s+=srcStride;\
  1230. OP(*d, (src15+src16)*20 - (src14+src17)*5 + (src13+src18));d+=dstStride;\
  1231. } \
  1232. } \
  1233. dst++;\
  1234. src++;\
  1235. }while(--w);\
  1236. }\
  1237. \
  1238. static inline void OPNAME ## h264_qpel_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride,int w,int h){\
  1239. uint8_t *cm = cropTbl + MAX_NEG_CROP;\
  1240. int i;\
  1241. src -= 2*srcStride;\
  1242. i= h+5; \
  1243. do {\
  1244. int srcB,srcA,src0,src1,src2,src3,src4,src5,src6;\
  1245. uint8_t *s = src-2;\
  1246. srcB = *s++;\
  1247. srcA = *s++;\
  1248. src0 = *s++;\
  1249. src1 = *s++;\
  1250. src2 = *s++;\
  1251. src3 = *s++;\
  1252. tmp[0] = ((src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
  1253. src4 = *s++;\
  1254. tmp[1] = ((src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
  1255. src5 = *s++;\
  1256. tmp[2] = ((src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
  1257. src6 = *s++;\
  1258. tmp[3] = ((src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
  1259. if (w>4) { /* it optimized */ \
  1260. int src7,src8,src9,src10; \
  1261. src7 = *s++;\
  1262. tmp[4] = ((src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
  1263. src8 = *s++;\
  1264. tmp[5] = ((src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
  1265. src9 = *s++;\
  1266. tmp[6] = ((src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
  1267. src10 = *s++;\
  1268. tmp[7] = ((src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
  1269. if (w>8) { \
  1270. int src11,src12,src13,src14,src15,src16,src17,src18; \
  1271. src11 = *s++;\
  1272. tmp[8] = ((src8 +src9 )*20 - (src7 +src10)*5 + (src6 +src11));\
  1273. src12 = *s++;\
  1274. tmp[9] = ((src9 +src10)*20 - (src8 +src11)*5 + (src7 +src12));\
  1275. src13 = *s++;\
  1276. tmp[10] = ((src10+src11)*20 - (src9 +src12)*5 + (src8 +src13));\
  1277. src14 = *s++;\
  1278. tmp[11] = ((src11+src12)*20 - (src10+src13)*5 + (src9 +src14));\
  1279. src15 = *s++;\
  1280. tmp[12] = ((src12+src13)*20 - (src11+src14)*5 + (src10+src15));\
  1281. src16 = *s++;\
  1282. tmp[13] = ((src13+src14)*20 - (src12+src15)*5 + (src11+src16));\
  1283. src17 = *s++;\
  1284. tmp[14] = ((src14+src15)*20 - (src13+src16)*5 + (src12+src17));\
  1285. src18 = *s++;\
  1286. tmp[15] = ((src15+src16)*20 - (src14+src17)*5 + (src13+src18));\
  1287. } \
  1288. } \
  1289. tmp+=tmpStride;\
  1290. src+=srcStride;\
  1291. }while(--i);\
  1292. tmp -= tmpStride*(h+5-2);\
  1293. i = w; \
  1294. do {\
  1295. int tmpB,tmpA,tmp0,tmp1,tmp2,tmp3,tmp4,tmp5,tmp6;\
  1296. int16_t *s = tmp-2*tmpStride; \
  1297. uint8_t *d=dst;\
  1298. tmpB = *s; s+=tmpStride;\
  1299. tmpA = *s; s+=tmpStride;\
  1300. tmp0 = *s; s+=tmpStride;\
  1301. tmp1 = *s; s+=tmpStride;\
  1302. tmp2 = *s; s+=tmpStride;\
  1303. tmp3 = *s; s+=tmpStride;\
  1304. OP2(*d, (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));d+=dstStride;\
  1305. tmp4 = *s; s+=tmpStride;\
  1306. OP2(*d, (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));d+=dstStride;\
  1307. tmp5 = *s; s+=tmpStride;\
  1308. OP2(*d, (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));d+=dstStride;\
  1309. tmp6 = *s; s+=tmpStride;\
  1310. OP2(*d, (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));d+=dstStride;\
  1311. if (h>4) { \
  1312. int tmp7,tmp8,tmp9,tmp10; \
  1313. tmp7 = *s; s+=tmpStride;\
  1314. OP2(*d, (tmp4+tmp5)*20 - (tmp3+tmp6)*5 + (tmp2+tmp7));d+=dstStride;\
  1315. tmp8 = *s; s+=tmpStride;\
  1316. OP2(*d, (tmp5+tmp6)*20 - (tmp4+tmp7)*5 + (tmp3+tmp8));d+=dstStride;\
  1317. tmp9 = *s; s+=tmpStride;\
  1318. OP2(*d, (tmp6+tmp7)*20 - (tmp5+tmp8)*5 + (tmp4+tmp9));d+=dstStride;\
  1319. tmp10 = *s; s+=tmpStride;\
  1320. OP2(*d, (tmp7+tmp8)*20 - (tmp6+tmp9)*5 + (tmp5+tmp10));d+=dstStride;\
  1321. if (h>8) { \
  1322. int tmp11,tmp12,tmp13,tmp14,tmp15,tmp16,tmp17,tmp18; \
  1323. tmp11 = *s; s+=tmpStride;\
  1324. OP2(*d , (tmp8 +tmp9 )*20 - (tmp7 +tmp10)*5 + (tmp6 +tmp11));d+=dstStride;\
  1325. tmp12 = *s; s+=tmpStride;\
  1326. OP2(*d , (tmp9 +tmp10)*20 - (tmp8 +tmp11)*5 + (tmp7 +tmp12));d+=dstStride;\
  1327. tmp13 = *s; s+=tmpStride;\
  1328. OP2(*d, (tmp10+tmp11)*20 - (tmp9 +tmp12)*5 + (tmp8 +tmp13));d+=dstStride;\
  1329. tmp14 = *s; s+=tmpStride;\
  1330. OP2(*d, (tmp11+tmp12)*20 - (tmp10+tmp13)*5 + (tmp9 +tmp14));d+=dstStride;\
  1331. tmp15 = *s; s+=tmpStride;\
  1332. OP2(*d, (tmp12+tmp13)*20 - (tmp11+tmp14)*5 + (tmp10+tmp15));d+=dstStride;\
  1333. tmp16 = *s; s+=tmpStride;\
  1334. OP2(*d, (tmp13+tmp14)*20 - (tmp12+tmp15)*5 + (tmp11+tmp16));d+=dstStride;\
  1335. tmp17 = *s; s+=tmpStride;\
  1336. OP2(*d, (tmp14+tmp15)*20 - (tmp13+tmp16)*5 + (tmp12+tmp17));d+=dstStride;\
  1337. tmp18 = *s; s+=tmpStride;\
  1338. OP2(*d, (tmp15+tmp16)*20 - (tmp14+tmp17)*5 + (tmp13+tmp18));d+=dstStride;\
  1339. } \
  1340. } \
  1341. dst++;\
  1342. tmp++;\
  1343. }while(--i);\
  1344. }\
  1345. \
  1346. static void OPNAME ## h264_qpel4_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1347. OPNAME ## h264_qpel_h_lowpass(dst,src,dstStride,srcStride,4,4); \
  1348. }\
  1349. static void OPNAME ## h264_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1350. OPNAME ## h264_qpel_h_lowpass(dst,src,dstStride,srcStride,8,8); \
  1351. }\
  1352. static void OPNAME ## h264_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1353. OPNAME ## h264_qpel_h_lowpass(dst,src,dstStride,srcStride,16,16); \
  1354. }\
  1355. \
  1356. static void OPNAME ## h264_qpel4_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1357. OPNAME ## h264_qpel_v_lowpass(dst,src,dstStride,srcStride,4,4); \
  1358. }\
  1359. static void OPNAME ## h264_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1360. OPNAME ## h264_qpel_v_lowpass(dst,src,dstStride,srcStride,8,8); \
  1361. }\
  1362. static void OPNAME ## h264_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1363. OPNAME ## h264_qpel_v_lowpass(dst,src,dstStride,srcStride,16,16); \
  1364. }\
  1365. static void OPNAME ## h264_qpel4_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  1366. OPNAME ## h264_qpel_hv_lowpass(dst,tmp,src,dstStride,tmpStride,srcStride,4,4); \
  1367. }\
  1368. static void OPNAME ## h264_qpel8_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  1369. OPNAME ## h264_qpel_hv_lowpass(dst,tmp,src,dstStride,tmpStride,srcStride,8,8); \
  1370. }\
  1371. static void OPNAME ## h264_qpel16_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  1372. OPNAME ## h264_qpel_hv_lowpass(dst,tmp,src,dstStride,tmpStride,srcStride,16,16); \
  1373. }\
  1374. #define H264_MC(OPNAME, SIZE) \
  1375. static void OPNAME ## h264_qpel ## SIZE ## _mc00_c (uint8_t *dst, uint8_t *src, int stride){\
  1376. OPNAME ## pixels ## SIZE ## _c(dst, src, stride, SIZE);\
  1377. }\
  1378. \
  1379. static void OPNAME ## h264_qpel ## SIZE ## _mc10_c(uint8_t *dst, uint8_t *src, int stride){\
  1380. uint8_t half[SIZE*SIZE];\
  1381. put_h264_qpel ## SIZE ## _h_lowpass(half, src, SIZE, stride);\
  1382. OPNAME ## pixels ## SIZE ## _l2_aligned2(dst, src, half, stride, stride, SIZE, SIZE);\
  1383. }\
  1384. \
  1385. static void OPNAME ## h264_qpel ## SIZE ## _mc20_c(uint8_t *dst, uint8_t *src, int stride){\
  1386. OPNAME ## h264_qpel ## SIZE ## _h_lowpass(dst, src, stride, stride);\
  1387. }\
  1388. \
  1389. static void OPNAME ## h264_qpel ## SIZE ## _mc30_c(uint8_t *dst, uint8_t *src, int stride){\
  1390. uint8_t half[SIZE*SIZE];\
  1391. put_h264_qpel ## SIZE ## _h_lowpass(half, src, SIZE, stride);\
  1392. OPNAME ## pixels ## SIZE ## _l2_aligned2(dst, src+1, half, stride, stride, SIZE, SIZE);\
  1393. }\
  1394. \
  1395. static void OPNAME ## h264_qpel ## SIZE ## _mc01_c(uint8_t *dst, uint8_t *src, int stride){\
  1396. uint8_t full[SIZE*(SIZE+5)];\
  1397. uint8_t * const full_mid= full + SIZE*2;\
  1398. uint8_t half[SIZE*SIZE];\
  1399. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1400. put_h264_qpel ## SIZE ## _v_lowpass(half, full_mid, SIZE, SIZE);\
  1401. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, full_mid, half, stride, SIZE, SIZE, SIZE);\
  1402. }\
  1403. \
  1404. static void OPNAME ## h264_qpel ## SIZE ## _mc02_c(uint8_t *dst, uint8_t *src, int stride){\
  1405. uint8_t full[SIZE*(SIZE+5)];\
  1406. uint8_t * const full_mid= full + SIZE*2;\
  1407. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1408. OPNAME ## h264_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE);\
  1409. }\
  1410. \
  1411. static void OPNAME ## h264_qpel ## SIZE ## _mc03_c(uint8_t *dst, uint8_t *src, int stride){\
  1412. uint8_t full[SIZE*(SIZE+5)];\
  1413. uint8_t * const full_mid= full + SIZE*2;\
  1414. uint8_t half[SIZE*SIZE];\
  1415. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1416. put_h264_qpel ## SIZE ## _v_lowpass(half, full_mid, SIZE, SIZE);\
  1417. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, full_mid+SIZE, half, stride, SIZE, SIZE, SIZE);\
  1418. }\
  1419. \
  1420. static void OPNAME ## h264_qpel ## SIZE ## _mc11_c(uint8_t *dst, uint8_t *src, int stride){\
  1421. uint8_t full[SIZE*(SIZE+5)];\
  1422. uint8_t * const full_mid= full + SIZE*2;\
  1423. uint8_t halfH[SIZE*SIZE];\
  1424. uint8_t halfV[SIZE*SIZE];\
  1425. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
  1426. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1427. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1428. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
  1429. }\
  1430. \
  1431. static void OPNAME ## h264_qpel ## SIZE ## _mc31_c(uint8_t *dst, uint8_t *src, int stride){\
  1432. uint8_t full[SIZE*(SIZE+5)];\
  1433. uint8_t * const full_mid= full + SIZE*2;\
  1434. uint8_t halfH[SIZE*SIZE];\
  1435. uint8_t halfV[SIZE*SIZE];\
  1436. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
  1437. copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
  1438. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1439. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
  1440. }\
  1441. \
  1442. static void OPNAME ## h264_qpel ## SIZE ## _mc13_c(uint8_t *dst, uint8_t *src, int stride){\
  1443. uint8_t full[SIZE*(SIZE+5)];\
  1444. uint8_t * const full_mid= full + SIZE*2;\
  1445. uint8_t halfH[SIZE*SIZE];\
  1446. uint8_t halfV[SIZE*SIZE];\
  1447. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
  1448. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1449. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1450. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
  1451. }\
  1452. \
  1453. static void OPNAME ## h264_qpel ## SIZE ## _mc33_c(uint8_t *dst, uint8_t *src, int stride){\
  1454. uint8_t full[SIZE*(SIZE+5)];\
  1455. uint8_t * const full_mid= full + SIZE*2;\
  1456. uint8_t halfH[SIZE*SIZE];\
  1457. uint8_t halfV[SIZE*SIZE];\
  1458. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
  1459. copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
  1460. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1461. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
  1462. }\
  1463. \
  1464. static void OPNAME ## h264_qpel ## SIZE ## _mc22_c(uint8_t *dst, uint8_t *src, int stride){\
  1465. int16_t tmp[SIZE*(SIZE+5)];\
  1466. OPNAME ## h264_qpel ## SIZE ## _hv_lowpass(dst, tmp, src, stride, SIZE, stride);\
  1467. }\
  1468. \
  1469. static void OPNAME ## h264_qpel ## SIZE ## _mc21_c(uint8_t *dst, uint8_t *src, int stride){\
  1470. int16_t tmp[SIZE*(SIZE+5)];\
  1471. uint8_t halfH[SIZE*SIZE];\
  1472. uint8_t halfHV[SIZE*SIZE];\
  1473. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
  1474. put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
  1475. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfHV, stride, SIZE, SIZE, SIZE);\
  1476. }\
  1477. \
  1478. static void OPNAME ## h264_qpel ## SIZE ## _mc23_c(uint8_t *dst, uint8_t *src, int stride){\
  1479. int16_t tmp[SIZE*(SIZE+5)];\
  1480. uint8_t halfH[SIZE*SIZE];\
  1481. uint8_t halfHV[SIZE*SIZE];\
  1482. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
  1483. put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
  1484. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfHV, stride, SIZE, SIZE, SIZE);\
  1485. }\
  1486. \
  1487. static void OPNAME ## h264_qpel ## SIZE ## _mc12_c(uint8_t *dst, uint8_t *src, int stride){\
  1488. uint8_t full[SIZE*(SIZE+5)];\
  1489. uint8_t * const full_mid= full + SIZE*2;\
  1490. int16_t tmp[SIZE*(SIZE+5)];\
  1491. uint8_t halfV[SIZE*SIZE];\
  1492. uint8_t halfHV[SIZE*SIZE];\
  1493. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1494. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1495. put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
  1496. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfV, halfHV, stride, SIZE, SIZE, SIZE);\
  1497. }\
  1498. \
  1499. static void OPNAME ## h264_qpel ## SIZE ## _mc32_c(uint8_t *dst, uint8_t *src, int stride){\
  1500. uint8_t full[SIZE*(SIZE+5)];\
  1501. uint8_t * const full_mid= full + SIZE*2;\
  1502. int16_t tmp[SIZE*(SIZE+5)];\
  1503. uint8_t halfV[SIZE*SIZE];\
  1504. uint8_t halfHV[SIZE*SIZE];\
  1505. copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
  1506. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1507. put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
  1508. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfV, halfHV, stride, SIZE, SIZE, SIZE);\
  1509. }\
  1510. #define op_avg(a, b) a = (((a)+cm[((b) + 16)>>5]+1)>>1)
  1511. //#define op_avg2(a, b) a = (((a)*w1+cm[((b) + 16)>>5]*w2 + o + 64)>>7)
  1512. #define op_put(a, b) a = cm[((b) + 16)>>5]
  1513. #define op2_avg(a, b) a = (((a)+cm[((b) + 512)>>10]+1)>>1)
  1514. #define op2_put(a, b) a = cm[((b) + 512)>>10]
  1515. H264_LOWPASS(put_ , op_put, op2_put)
  1516. H264_LOWPASS(avg_ , op_avg, op2_avg)
  1517. H264_MC(put_, 4)
  1518. H264_MC(put_, 8)
  1519. H264_MC(put_, 16)
  1520. H264_MC(avg_, 4)
  1521. H264_MC(avg_, 8)
  1522. H264_MC(avg_, 16)
  1523. #undef op_avg
  1524. #undef op_put
  1525. #undef op2_avg
  1526. #undef op2_put
  1527. #endif
  1528. static void wmv2_mspel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){
  1529. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  1530. do{
  1531. int src_1,src0,src1,src2,src3,src4,src5,src6,src7,src8,src9;
  1532. uint8_t *s = src;
  1533. src_1 = s[-1];
  1534. src0 = *s++;
  1535. src1 = *s++;
  1536. src2 = *s++;
  1537. dst[0]= cm[(9*(src0 + src1) - (src_1 + src2) + 8)>>4];
  1538. src3 = *s++;
  1539. dst[1]= cm[(9*(src1 + src2) - (src0 + src3) + 8)>>4];
  1540. src4 = *s++;
  1541. dst[2]= cm[(9*(src2 + src3) - (src1 + src4) + 8)>>4];
  1542. src5 = *s++;
  1543. dst[3]= cm[(9*(src3 + src4) - (src2 + src5) + 8)>>4];
  1544. src6 = *s++;
  1545. dst[4]= cm[(9*(src4 + src5) - (src3 + src6) + 8)>>4];
  1546. src7 = *s++;
  1547. dst[5]= cm[(9*(src5 + src6) - (src4 + src7) + 8)>>4];
  1548. src8 = *s++;
  1549. dst[6]= cm[(9*(src6 + src7) - (src5 + src8) + 8)>>4];
  1550. src9 = *s++;
  1551. dst[7]= cm[(9*(src7 + src8) - (src6 + src9) + 8)>>4];
  1552. dst+=dstStride;
  1553. src+=srcStride;
  1554. }while(--h);
  1555. }
  1556. static void wmv2_mspel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int w){
  1557. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  1558. do{
  1559. int src_1,src0,src1,src2,src3,src4,src5,src6,src7,src8,src9;
  1560. uint8_t *s = src,*d = dst;
  1561. src_1 = *(s-srcStride);
  1562. src0 = *s; s+=srcStride;
  1563. src1 = *s; s+=srcStride;
  1564. src2 = *s; s+=srcStride;
  1565. *d= cm[(9*(src0 + src1) - (src_1 + src2) + 8)>>4]; d+=dstStride;
  1566. src3 = *s; s+=srcStride;
  1567. *d= cm[(9*(src1 + src2) - (src0 + src3) + 8)>>4]; d+=dstStride;
  1568. src4 = *s; s+=srcStride;
  1569. *d= cm[(9*(src2 + src3) - (src1 + src4) + 8)>>4]; d+=dstStride;
  1570. src5 = *s; s+=srcStride;
  1571. *d= cm[(9*(src3 + src4) - (src2 + src5) + 8)>>4]; d+=dstStride;
  1572. src6 = *s; s+=srcStride;
  1573. *d= cm[(9*(src4 + src5) - (src3 + src6) + 8)>>4]; d+=dstStride;
  1574. src7 = *s; s+=srcStride;
  1575. *d= cm[(9*(src5 + src6) - (src4 + src7) + 8)>>4]; d+=dstStride;
  1576. src8 = *s; s+=srcStride;
  1577. *d= cm[(9*(src6 + src7) - (src5 + src8) + 8)>>4]; d+=dstStride;
  1578. src9 = *s;
  1579. *d= cm[(9*(src7 + src8) - (src6 + src9) + 8)>>4]; d+=dstStride;
  1580. src++;
  1581. dst++;
  1582. }while(--w);
  1583. }
  1584. static void put_mspel8_mc00_c (uint8_t *dst, uint8_t *src, int stride){
  1585. put_pixels8_c(dst, src, stride, 8);
  1586. }
  1587. static void put_mspel8_mc10_c(uint8_t *dst, uint8_t *src, int stride){
  1588. uint8_t half[64];
  1589. wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
  1590. put_pixels8_l2_aligned2(dst, src, half, stride, stride, 8, 8);
  1591. }
  1592. static void put_mspel8_mc20_c(uint8_t *dst, uint8_t *src, int stride){
  1593. wmv2_mspel8_h_lowpass(dst, src, stride, stride, 8);
  1594. }
  1595. static void put_mspel8_mc30_c(uint8_t *dst, uint8_t *src, int stride){
  1596. uint8_t half[64];
  1597. wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
  1598. put_pixels8_l2_aligned2(dst, src+1, half, stride, stride, 8, 8);
  1599. }
  1600. static void put_mspel8_mc02_c(uint8_t *dst, uint8_t *src, int stride){
  1601. wmv2_mspel8_v_lowpass(dst, src, stride, stride, 8);
  1602. }
  1603. static void put_mspel8_mc12_c(uint8_t *dst, uint8_t *src, int stride){
  1604. uint8_t halfH[88];
  1605. uint8_t halfV[64];
  1606. uint8_t halfHV[64];
  1607. wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
  1608. wmv2_mspel8_v_lowpass(halfV, src, 8, stride, 8);
  1609. wmv2_mspel8_v_lowpass(halfHV, halfH+8, 8, 8, 8);
  1610. put_pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);
  1611. }
  1612. static void put_mspel8_mc32_c(uint8_t *dst, uint8_t *src, int stride){
  1613. uint8_t halfH[88];
  1614. uint8_t halfV[64];
  1615. uint8_t halfHV[64];
  1616. wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
  1617. wmv2_mspel8_v_lowpass(halfV, src+1, 8, stride, 8);
  1618. wmv2_mspel8_v_lowpass(halfHV, halfH+8, 8, 8, 8);
  1619. put_pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);
  1620. }
  1621. static void put_mspel8_mc22_c(uint8_t *dst, uint8_t *src, int stride){
  1622. uint8_t halfH[88];
  1623. wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
  1624. wmv2_mspel8_v_lowpass(dst, halfH+8, stride, 8, 8);
  1625. }