You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1599 lines
69KB

  1. /*
  2. * This is optimized for sh, which have post increment addressing (*p++).
  3. * Some CPU may be index (p[n]) faster than post increment (*p++).
  4. *
  5. * copyright (c) 2001-2003 BERO <bero@geocities.co.jp>
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #define PIXOP2(OPNAME, OP) \
  24. /*static inline void OPNAME ## _no_rnd_pixels8_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  25. {\
  26. do {\
  27. OP(LP(dst ),no_rnd_avg32(AV_RN32(src1 ),AV_RN32(src2 )) ); \
  28. OP(LP(dst+4),no_rnd_avg32(AV_RN32(src1+4),AV_RN32(src2+4)) ); \
  29. src1+=src_stride1; \
  30. src2+=src_stride2; \
  31. dst+=dst_stride; \
  32. } while(--h); \
  33. }\
  34. \
  35. static inline void OPNAME ## _pixels8_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  36. {\
  37. do {\
  38. OP(LP(dst ),rnd_avg32(AV_RN32(src1 ),AV_RN32(src2 )) ); \
  39. OP(LP(dst+4),rnd_avg32(AV_RN32(src1+4),AV_RN32(src2+4)) ); \
  40. src1+=src_stride1; \
  41. src2+=src_stride2; \
  42. dst+=dst_stride; \
  43. } while(--h); \
  44. }\
  45. \
  46. static inline void OPNAME ## _pixels4_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  47. {\
  48. do {\
  49. OP(LP(dst ),rnd_avg32(AV_RN32(src1 ),AV_RN32(src2 )) ); \
  50. src1+=src_stride1; \
  51. src2+=src_stride2; \
  52. dst+=dst_stride; \
  53. } while(--h); \
  54. }\
  55. \
  56. static inline void OPNAME ## _no_rnd_pixels16_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  57. {\
  58. do {\
  59. OP(LP(dst ),no_rnd_avg32(AV_RN32(src1 ),AV_RN32(src2 )) ); \
  60. OP(LP(dst+4),no_rnd_avg32(AV_RN32(src1+4),AV_RN32(src2+4)) ); \
  61. OP(LP(dst+8),no_rnd_avg32(AV_RN32(src1+8),AV_RN32(src2+8)) ); \
  62. OP(LP(dst+12),no_rnd_avg32(AV_RN32(src1+12),AV_RN32(src2+12)) ); \
  63. src1+=src_stride1; \
  64. src2+=src_stride2; \
  65. dst+=dst_stride; \
  66. } while(--h); \
  67. }\
  68. \
  69. static inline void OPNAME ## _pixels16_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  70. {\
  71. do {\
  72. OP(LP(dst ),rnd_avg32(AV_RN32(src1 ),AV_RN32(src2 )) ); \
  73. OP(LP(dst+4),rnd_avg32(AV_RN32(src1+4),AV_RN32(src2+4)) ); \
  74. OP(LP(dst+8),rnd_avg32(AV_RN32(src1+8),AV_RN32(src2+8)) ); \
  75. OP(LP(dst+12),rnd_avg32(AV_RN32(src1+12),AV_RN32(src2+12)) ); \
  76. src1+=src_stride1; \
  77. src2+=src_stride2; \
  78. dst+=dst_stride; \
  79. } while(--h); \
  80. }*/\
  81. \
  82. static inline void OPNAME ## _pixels4_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  83. {\
  84. do {\
  85. OP(LP(dst ),rnd_avg32(LP(src1 ),LP(src2 )) ); \
  86. src1+=src_stride1; \
  87. src2+=src_stride2; \
  88. dst+=dst_stride; \
  89. } while(--h); \
  90. }\
  91. \
  92. static inline void OPNAME ## _pixels4_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  93. {\
  94. do {\
  95. OP(LP(dst ),rnd_avg32(AV_RN32(src1 ),LP(src2 )) ); \
  96. src1+=src_stride1; \
  97. src2+=src_stride2; \
  98. dst+=dst_stride; \
  99. } while(--h); \
  100. }\
  101. \
  102. static inline void OPNAME ## _no_rnd_pixels16_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  103. {\
  104. do {\
  105. OP(LP(dst ),no_rnd_avg32(AV_RN32(src1 ),LP(src2 )) ); \
  106. OP(LP(dst+4),no_rnd_avg32(AV_RN32(src1+4),LP(src2+4)) ); \
  107. OP(LP(dst+8),no_rnd_avg32(AV_RN32(src1+8),LP(src2+8)) ); \
  108. OP(LP(dst+12),no_rnd_avg32(AV_RN32(src1+12),LP(src2+12)) ); \
  109. src1+=src_stride1; \
  110. src2+=src_stride2; \
  111. dst+=dst_stride; \
  112. } while(--h); \
  113. }\
  114. \
  115. static inline void OPNAME ## _pixels16_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  116. {\
  117. do {\
  118. OP(LP(dst ),rnd_avg32(AV_RN32(src1 ),LP(src2 )) ); \
  119. OP(LP(dst+4),rnd_avg32(AV_RN32(src1+4),LP(src2+4)) ); \
  120. OP(LP(dst+8),rnd_avg32(AV_RN32(src1+8),LP(src2+8)) ); \
  121. OP(LP(dst+12),rnd_avg32(AV_RN32(src1+12),LP(src2+12)) ); \
  122. src1+=src_stride1; \
  123. src2+=src_stride2; \
  124. dst+=dst_stride; \
  125. } while(--h); \
  126. }\
  127. \
  128. static inline void OPNAME ## _no_rnd_pixels8_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  129. {\
  130. do { /* onlye src2 aligned */\
  131. OP(LP(dst ),no_rnd_avg32(AV_RN32(src1 ),LP(src2 )) ); \
  132. OP(LP(dst+4),no_rnd_avg32(AV_RN32(src1+4),LP(src2+4)) ); \
  133. src1+=src_stride1; \
  134. src2+=src_stride2; \
  135. dst+=dst_stride; \
  136. } while(--h); \
  137. }\
  138. \
  139. static inline void OPNAME ## _pixels8_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  140. {\
  141. do {\
  142. OP(LP(dst ),rnd_avg32(AV_RN32(src1 ),LP(src2 )) ); \
  143. OP(LP(dst+4),rnd_avg32(AV_RN32(src1+4),LP(src2+4)) ); \
  144. src1+=src_stride1; \
  145. src2+=src_stride2; \
  146. dst+=dst_stride; \
  147. } while(--h); \
  148. }\
  149. \
  150. static inline void OPNAME ## _no_rnd_pixels8_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  151. {\
  152. do {\
  153. OP(LP(dst ),no_rnd_avg32(LP(src1 ),LP(src2 )) ); \
  154. OP(LP(dst+4),no_rnd_avg32(LP(src1+4),LP(src2+4)) ); \
  155. src1+=src_stride1; \
  156. src2+=src_stride2; \
  157. dst+=dst_stride; \
  158. } while(--h); \
  159. }\
  160. \
  161. static inline void OPNAME ## _pixels8_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  162. {\
  163. do {\
  164. OP(LP(dst ),rnd_avg32(LP(src1 ),LP(src2 )) ); \
  165. OP(LP(dst+4),rnd_avg32(LP(src1+4),LP(src2+4)) ); \
  166. src1+=src_stride1; \
  167. src2+=src_stride2; \
  168. dst+=dst_stride; \
  169. } while(--h); \
  170. }\
  171. \
  172. static inline void OPNAME ## _no_rnd_pixels16_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  173. {\
  174. do {\
  175. OP(LP(dst ),no_rnd_avg32(LP(src1 ),LP(src2 )) ); \
  176. OP(LP(dst+4),no_rnd_avg32(LP(src1+4),LP(src2+4)) ); \
  177. OP(LP(dst+8),no_rnd_avg32(LP(src1+8),LP(src2+8)) ); \
  178. OP(LP(dst+12),no_rnd_avg32(LP(src1+12),LP(src2+12)) ); \
  179. src1+=src_stride1; \
  180. src2+=src_stride2; \
  181. dst+=dst_stride; \
  182. } while(--h); \
  183. }\
  184. \
  185. static inline void OPNAME ## _pixels16_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  186. {\
  187. do {\
  188. OP(LP(dst ),rnd_avg32(LP(src1 ),LP(src2 )) ); \
  189. OP(LP(dst+4),rnd_avg32(LP(src1+4),LP(src2+4)) ); \
  190. OP(LP(dst+8),rnd_avg32(LP(src1+8),LP(src2+8)) ); \
  191. OP(LP(dst+12),rnd_avg32(LP(src1+12),LP(src2+12)) ); \
  192. src1+=src_stride1; \
  193. src2+=src_stride2; \
  194. dst+=dst_stride; \
  195. } while(--h); \
  196. }\
  197. \
  198. static inline void OPNAME ## _no_rnd_pixels16_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  199. { OPNAME ## _no_rnd_pixels16_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
  200. \
  201. static inline void OPNAME ## _pixels16_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  202. { OPNAME ## _pixels16_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
  203. \
  204. static inline void OPNAME ## _no_rnd_pixels8_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  205. { OPNAME ## _no_rnd_pixels8_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
  206. \
  207. static inline void OPNAME ## _pixels8_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
  208. { OPNAME ## _pixels8_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
  209. \
  210. static inline void OPNAME ## _pixels8_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  211. do { \
  212. uint32_t a0,a1,a2,a3; \
  213. UNPACK(a0,a1,LP(src1),LP(src2)); \
  214. UNPACK(a2,a3,LP(src3),LP(src4)); \
  215. OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
  216. UNPACK(a0,a1,LP(src1+4),LP(src2+4)); \
  217. UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
  218. OP(LP(dst+4),rnd_PACK(a0,a1,a2,a3)); \
  219. src1+=src_stride1;\
  220. src2+=src_stride2;\
  221. src3+=src_stride3;\
  222. src4+=src_stride4;\
  223. dst+=dst_stride;\
  224. } while(--h); \
  225. } \
  226. \
  227. static inline void OPNAME ## _no_rnd_pixels8_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  228. do { \
  229. uint32_t a0,a1,a2,a3; \
  230. UNPACK(a0,a1,LP(src1),LP(src2)); \
  231. UNPACK(a2,a3,LP(src3),LP(src4)); \
  232. OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
  233. UNPACK(a0,a1,LP(src1+4),LP(src2+4)); \
  234. UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
  235. OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
  236. src1+=src_stride1;\
  237. src2+=src_stride2;\
  238. src3+=src_stride3;\
  239. src4+=src_stride4;\
  240. dst+=dst_stride;\
  241. } while(--h); \
  242. } \
  243. \
  244. static inline void OPNAME ## _pixels8_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  245. do { \
  246. uint32_t a0,a1,a2,a3; /* src1 only not aligned */\
  247. UNPACK(a0,a1,AV_RN32(src1),LP(src2)); \
  248. UNPACK(a2,a3,LP(src3),LP(src4)); \
  249. OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
  250. UNPACK(a0,a1,AV_RN32(src1+4),LP(src2+4)); \
  251. UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
  252. OP(LP(dst+4),rnd_PACK(a0,a1,a2,a3)); \
  253. src1+=src_stride1;\
  254. src2+=src_stride2;\
  255. src3+=src_stride3;\
  256. src4+=src_stride4;\
  257. dst+=dst_stride;\
  258. } while(--h); \
  259. } \
  260. \
  261. static inline void OPNAME ## _no_rnd_pixels8_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  262. do { \
  263. uint32_t a0,a1,a2,a3; \
  264. UNPACK(a0,a1,AV_RN32(src1),LP(src2)); \
  265. UNPACK(a2,a3,LP(src3),LP(src4)); \
  266. OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
  267. UNPACK(a0,a1,AV_RN32(src1+4),LP(src2+4)); \
  268. UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
  269. OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
  270. src1+=src_stride1;\
  271. src2+=src_stride2;\
  272. src3+=src_stride3;\
  273. src4+=src_stride4;\
  274. dst+=dst_stride;\
  275. } while(--h); \
  276. } \
  277. \
  278. static inline void OPNAME ## _pixels16_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  279. do { \
  280. uint32_t a0,a1,a2,a3; \
  281. UNPACK(a0,a1,LP(src1),LP(src2)); \
  282. UNPACK(a2,a3,LP(src3),LP(src4)); \
  283. OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
  284. UNPACK(a0,a1,LP(src1+4),LP(src2+4)); \
  285. UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
  286. OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
  287. UNPACK(a0,a1,LP(src1+8),LP(src2+8)); \
  288. UNPACK(a2,a3,LP(src3+8),LP(src4+8)); \
  289. OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
  290. UNPACK(a0,a1,LP(src1+12),LP(src2+12)); \
  291. UNPACK(a2,a3,LP(src3+12),LP(src4+12)); \
  292. OP(LP(dst+12),rnd_PACK(a0,a1,a2,a3)); \
  293. src1+=src_stride1;\
  294. src2+=src_stride2;\
  295. src3+=src_stride3;\
  296. src4+=src_stride4;\
  297. dst+=dst_stride;\
  298. } while(--h); \
  299. } \
  300. \
  301. static inline void OPNAME ## _no_rnd_pixels16_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  302. do { \
  303. uint32_t a0,a1,a2,a3; \
  304. UNPACK(a0,a1,LP(src1),LP(src2)); \
  305. UNPACK(a2,a3,LP(src3),LP(src4)); \
  306. OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
  307. UNPACK(a0,a1,LP(src1+4),LP(src2+4)); \
  308. UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
  309. OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
  310. UNPACK(a0,a1,LP(src1+8),LP(src2+8)); \
  311. UNPACK(a2,a3,LP(src3+8),LP(src4+8)); \
  312. OP(LP(dst+8),no_rnd_PACK(a0,a1,a2,a3)); \
  313. UNPACK(a0,a1,LP(src1+12),LP(src2+12)); \
  314. UNPACK(a2,a3,LP(src3+12),LP(src4+12)); \
  315. OP(LP(dst+12),no_rnd_PACK(a0,a1,a2,a3)); \
  316. src1+=src_stride1;\
  317. src2+=src_stride2;\
  318. src3+=src_stride3;\
  319. src4+=src_stride4;\
  320. dst+=dst_stride;\
  321. } while(--h); \
  322. } \
  323. \
  324. static inline void OPNAME ## _pixels16_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  325. do { /* src1 is unaligned */\
  326. uint32_t a0,a1,a2,a3; \
  327. UNPACK(a0,a1,AV_RN32(src1),LP(src2)); \
  328. UNPACK(a2,a3,LP(src3),LP(src4)); \
  329. OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
  330. UNPACK(a0,a1,AV_RN32(src1+4),LP(src2+4)); \
  331. UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
  332. OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
  333. UNPACK(a0,a1,AV_RN32(src1+8),LP(src2+8)); \
  334. UNPACK(a2,a3,LP(src3+8),LP(src4+8)); \
  335. OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
  336. UNPACK(a0,a1,AV_RN32(src1+12),LP(src2+12)); \
  337. UNPACK(a2,a3,LP(src3+12),LP(src4+12)); \
  338. OP(LP(dst+12),rnd_PACK(a0,a1,a2,a3)); \
  339. src1+=src_stride1;\
  340. src2+=src_stride2;\
  341. src3+=src_stride3;\
  342. src4+=src_stride4;\
  343. dst+=dst_stride;\
  344. } while(--h); \
  345. } \
  346. \
  347. static inline void OPNAME ## _no_rnd_pixels16_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  348. do { \
  349. uint32_t a0,a1,a2,a3; \
  350. UNPACK(a0,a1,AV_RN32(src1),LP(src2)); \
  351. UNPACK(a2,a3,LP(src3),LP(src4)); \
  352. OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
  353. UNPACK(a0,a1,AV_RN32(src1+4),LP(src2+4)); \
  354. UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
  355. OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
  356. UNPACK(a0,a1,AV_RN32(src1+8),LP(src2+8)); \
  357. UNPACK(a2,a3,LP(src3+8),LP(src4+8)); \
  358. OP(LP(dst+8),no_rnd_PACK(a0,a1,a2,a3)); \
  359. UNPACK(a0,a1,AV_RN32(src1+12),LP(src2+12)); \
  360. UNPACK(a2,a3,LP(src3+12),LP(src4+12)); \
  361. OP(LP(dst+12),no_rnd_PACK(a0,a1,a2,a3)); \
  362. src1+=src_stride1;\
  363. src2+=src_stride2;\
  364. src3+=src_stride3;\
  365. src4+=src_stride4;\
  366. dst+=dst_stride;\
  367. } while(--h); \
  368. } \
  369. \
  370. #define op_avg(a, b) a = rnd_avg32(a,b)
  371. #define op_put(a, b) a = b
  372. PIXOP2(avg, op_avg)
  373. PIXOP2(put, op_put)
  374. #undef op_avg
  375. #undef op_put
  376. #define avg2(a,b) ((a+b+1)>>1)
  377. #define avg4(a,b,c,d) ((a+b+c+d+2)>>2)
  378. static void gmc1_c(uint8_t *dst, uint8_t *src, int stride, int h, int x16, int y16, int rounder)
  379. {
  380. const int A=(16-x16)*(16-y16);
  381. const int B=( x16)*(16-y16);
  382. const int C=(16-x16)*( y16);
  383. const int D=( x16)*( y16);
  384. do {
  385. int t0,t1,t2,t3;
  386. uint8_t *s0 = src;
  387. uint8_t *s1 = src+stride;
  388. t0 = *s0++; t2 = *s1++;
  389. t1 = *s0++; t3 = *s1++;
  390. dst[0]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
  391. t0 = *s0++; t2 = *s1++;
  392. dst[1]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
  393. t1 = *s0++; t3 = *s1++;
  394. dst[2]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
  395. t0 = *s0++; t2 = *s1++;
  396. dst[3]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
  397. t1 = *s0++; t3 = *s1++;
  398. dst[4]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
  399. t0 = *s0++; t2 = *s1++;
  400. dst[5]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
  401. t1 = *s0++; t3 = *s1++;
  402. dst[6]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
  403. t0 = *s0++; t2 = *s1++;
  404. dst[7]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
  405. dst+= stride;
  406. src+= stride;
  407. }while(--h);
  408. }
  409. static void gmc_c(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
  410. int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
  411. {
  412. int y, vx, vy;
  413. const int s= 1<<shift;
  414. width--;
  415. height--;
  416. for(y=0; y<h; y++){
  417. int x;
  418. vx= ox;
  419. vy= oy;
  420. for(x=0; x<8; x++){ //XXX FIXME optimize
  421. int src_x, src_y, frac_x, frac_y, index;
  422. src_x= vx>>16;
  423. src_y= vy>>16;
  424. frac_x= src_x&(s-1);
  425. frac_y= src_y&(s-1);
  426. src_x>>=shift;
  427. src_y>>=shift;
  428. if((unsigned)src_x < width){
  429. if((unsigned)src_y < height){
  430. index= src_x + src_y*stride;
  431. dst[y*stride + x]= ( ( src[index ]*(s-frac_x)
  432. + src[index +1]* frac_x )*(s-frac_y)
  433. + ( src[index+stride ]*(s-frac_x)
  434. + src[index+stride+1]* frac_x )* frac_y
  435. + r)>>(shift*2);
  436. }else{
  437. index= src_x + av_clip(src_y, 0, height)*stride;
  438. dst[y*stride + x]= ( ( src[index ]*(s-frac_x)
  439. + src[index +1]* frac_x )*s
  440. + r)>>(shift*2);
  441. }
  442. }else{
  443. if((unsigned)src_y < height){
  444. index= av_clip(src_x, 0, width) + src_y*stride;
  445. dst[y*stride + x]= ( ( src[index ]*(s-frac_y)
  446. + src[index+stride ]* frac_y )*s
  447. + r)>>(shift*2);
  448. }else{
  449. index= av_clip(src_x, 0, width) + av_clip(src_y, 0, height)*stride;
  450. dst[y*stride + x]= src[index ];
  451. }
  452. }
  453. vx+= dxx;
  454. vy+= dyx;
  455. }
  456. ox += dxy;
  457. oy += dyy;
  458. }
  459. }
  460. #define H264_CHROMA_MC(OPNAME, OP)\
  461. static void OPNAME ## h264_chroma_mc2_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
  462. const int A=(8-x)*(8-y);\
  463. const int B=( x)*(8-y);\
  464. const int C=(8-x)*( y);\
  465. const int D=( x)*( y);\
  466. \
  467. assert(x<8 && y<8 && x>=0 && y>=0);\
  468. \
  469. do {\
  470. int t0,t1,t2,t3; \
  471. uint8_t *s0 = src; \
  472. uint8_t *s1 = src+stride; \
  473. t0 = *s0++; t2 = *s1++; \
  474. t1 = *s0++; t3 = *s1++; \
  475. OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
  476. t0 = *s0++; t2 = *s1++; \
  477. OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
  478. dst+= stride;\
  479. src+= stride;\
  480. }while(--h);\
  481. }\
  482. \
  483. static void OPNAME ## h264_chroma_mc4_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
  484. const int A=(8-x)*(8-y);\
  485. const int B=( x)*(8-y);\
  486. const int C=(8-x)*( y);\
  487. const int D=( x)*( y);\
  488. \
  489. assert(x<8 && y<8 && x>=0 && y>=0);\
  490. \
  491. do {\
  492. int t0,t1,t2,t3; \
  493. uint8_t *s0 = src; \
  494. uint8_t *s1 = src+stride; \
  495. t0 = *s0++; t2 = *s1++; \
  496. t1 = *s0++; t3 = *s1++; \
  497. OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
  498. t0 = *s0++; t2 = *s1++; \
  499. OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
  500. t1 = *s0++; t3 = *s1++; \
  501. OP(dst[2], (A*t0 + B*t1 + C*t2 + D*t3));\
  502. t0 = *s0++; t2 = *s1++; \
  503. OP(dst[3], (A*t1 + B*t0 + C*t3 + D*t2));\
  504. dst+= stride;\
  505. src+= stride;\
  506. }while(--h);\
  507. }\
  508. \
  509. static void OPNAME ## h264_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
  510. const int A=(8-x)*(8-y);\
  511. const int B=( x)*(8-y);\
  512. const int C=(8-x)*( y);\
  513. const int D=( x)*( y);\
  514. \
  515. assert(x<8 && y<8 && x>=0 && y>=0);\
  516. \
  517. do {\
  518. int t0,t1,t2,t3; \
  519. uint8_t *s0 = src; \
  520. uint8_t *s1 = src+stride; \
  521. t0 = *s0++; t2 = *s1++; \
  522. t1 = *s0++; t3 = *s1++; \
  523. OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
  524. t0 = *s0++; t2 = *s1++; \
  525. OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
  526. t1 = *s0++; t3 = *s1++; \
  527. OP(dst[2], (A*t0 + B*t1 + C*t2 + D*t3));\
  528. t0 = *s0++; t2 = *s1++; \
  529. OP(dst[3], (A*t1 + B*t0 + C*t3 + D*t2));\
  530. t1 = *s0++; t3 = *s1++; \
  531. OP(dst[4], (A*t0 + B*t1 + C*t2 + D*t3));\
  532. t0 = *s0++; t2 = *s1++; \
  533. OP(dst[5], (A*t1 + B*t0 + C*t3 + D*t2));\
  534. t1 = *s0++; t3 = *s1++; \
  535. OP(dst[6], (A*t0 + B*t1 + C*t2 + D*t3));\
  536. t0 = *s0++; t2 = *s1++; \
  537. OP(dst[7], (A*t1 + B*t0 + C*t3 + D*t2));\
  538. dst+= stride;\
  539. src+= stride;\
  540. }while(--h);\
  541. }
  542. #define op_avg(a, b) a = (((a)+(((b) + 32)>>6)+1)>>1)
  543. #define op_put(a, b) a = (((b) + 32)>>6)
  544. H264_CHROMA_MC(put_ , op_put)
  545. H264_CHROMA_MC(avg_ , op_avg)
  546. #undef op_avg
  547. #undef op_put
  548. #define QPEL_MC(r, OPNAME, RND, OP) \
  549. static void OPNAME ## mpeg4_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  550. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  551. do {\
  552. uint8_t *s = src; \
  553. int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
  554. src0= *s++;\
  555. src1= *s++;\
  556. src2= *s++;\
  557. src3= *s++;\
  558. src4= *s++;\
  559. OP(dst[0], (src0+src1)*20 - (src0+src2)*6 + (src1+src3)*3 - (src2+src4));\
  560. src5= *s++;\
  561. OP(dst[1], (src1+src2)*20 - (src0+src3)*6 + (src0+src4)*3 - (src1+src5));\
  562. src6= *s++;\
  563. OP(dst[2], (src2+src3)*20 - (src1+src4)*6 + (src0+src5)*3 - (src0+src6));\
  564. src7= *s++;\
  565. OP(dst[3], (src3+src4)*20 - (src2+src5)*6 + (src1+src6)*3 - (src0+src7));\
  566. src8= *s++;\
  567. OP(dst[4], (src4+src5)*20 - (src3+src6)*6 + (src2+src7)*3 - (src1+src8));\
  568. OP(dst[5], (src5+src6)*20 - (src4+src7)*6 + (src3+src8)*3 - (src2+src8));\
  569. OP(dst[6], (src6+src7)*20 - (src5+src8)*6 + (src4+src8)*3 - (src3+src7));\
  570. OP(dst[7], (src7+src8)*20 - (src6+src8)*6 + (src5+src7)*3 - (src4+src6));\
  571. dst+=dstStride;\
  572. src+=srcStride;\
  573. }while(--h);\
  574. }\
  575. \
  576. static void OPNAME ## mpeg4_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  577. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  578. int w=8;\
  579. do{\
  580. uint8_t *s = src, *d=dst;\
  581. int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
  582. src0 = *s; s+=srcStride; \
  583. src1 = *s; s+=srcStride; \
  584. src2 = *s; s+=srcStride; \
  585. src3 = *s; s+=srcStride; \
  586. src4 = *s; s+=srcStride; \
  587. OP(*d, (src0+src1)*20 - (src0+src2)*6 + (src1+src3)*3 - (src2+src4));d+=dstStride;\
  588. src5 = *s; s+=srcStride; \
  589. OP(*d, (src1+src2)*20 - (src0+src3)*6 + (src0+src4)*3 - (src1+src5));d+=dstStride;\
  590. src6 = *s; s+=srcStride; \
  591. OP(*d, (src2+src3)*20 - (src1+src4)*6 + (src0+src5)*3 - (src0+src6));d+=dstStride;\
  592. src7 = *s; s+=srcStride; \
  593. OP(*d, (src3+src4)*20 - (src2+src5)*6 + (src1+src6)*3 - (src0+src7));d+=dstStride;\
  594. src8 = *s; \
  595. OP(*d, (src4+src5)*20 - (src3+src6)*6 + (src2+src7)*3 - (src1+src8));d+=dstStride;\
  596. OP(*d, (src5+src6)*20 - (src4+src7)*6 + (src3+src8)*3 - (src2+src8));d+=dstStride;\
  597. OP(*d, (src6+src7)*20 - (src5+src8)*6 + (src4+src8)*3 - (src3+src7));d+=dstStride;\
  598. OP(*d, (src7+src8)*20 - (src6+src8)*6 + (src5+src7)*3 - (src4+src6));\
  599. dst++;\
  600. src++;\
  601. }while(--w);\
  602. }\
  603. \
  604. static void OPNAME ## mpeg4_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  605. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  606. do {\
  607. uint8_t *s = src;\
  608. int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
  609. int src9,src10,src11,src12,src13,src14,src15,src16;\
  610. src0= *s++;\
  611. src1= *s++;\
  612. src2= *s++;\
  613. src3= *s++;\
  614. src4= *s++;\
  615. OP(dst[ 0], (src0 +src1 )*20 - (src0 +src2 )*6 + (src1 +src3 )*3 - (src2 +src4 ));\
  616. src5= *s++;\
  617. OP(dst[ 1], (src1 +src2 )*20 - (src0 +src3 )*6 + (src0 +src4 )*3 - (src1 +src5 ));\
  618. src6= *s++;\
  619. OP(dst[ 2], (src2 +src3 )*20 - (src1 +src4 )*6 + (src0 +src5 )*3 - (src0 +src6 ));\
  620. src7= *s++;\
  621. OP(dst[ 3], (src3 +src4 )*20 - (src2 +src5 )*6 + (src1 +src6 )*3 - (src0 +src7 ));\
  622. src8= *s++;\
  623. OP(dst[ 4], (src4 +src5 )*20 - (src3 +src6 )*6 + (src2 +src7 )*3 - (src1 +src8 ));\
  624. src9= *s++;\
  625. OP(dst[ 5], (src5 +src6 )*20 - (src4 +src7 )*6 + (src3 +src8 )*3 - (src2 +src9 ));\
  626. src10= *s++;\
  627. OP(dst[ 6], (src6 +src7 )*20 - (src5 +src8 )*6 + (src4 +src9 )*3 - (src3 +src10));\
  628. src11= *s++;\
  629. OP(dst[ 7], (src7 +src8 )*20 - (src6 +src9 )*6 + (src5 +src10)*3 - (src4 +src11));\
  630. src12= *s++;\
  631. OP(dst[ 8], (src8 +src9 )*20 - (src7 +src10)*6 + (src6 +src11)*3 - (src5 +src12));\
  632. src13= *s++;\
  633. OP(dst[ 9], (src9 +src10)*20 - (src8 +src11)*6 + (src7 +src12)*3 - (src6 +src13));\
  634. src14= *s++;\
  635. OP(dst[10], (src10+src11)*20 - (src9 +src12)*6 + (src8 +src13)*3 - (src7 +src14));\
  636. src15= *s++;\
  637. OP(dst[11], (src11+src12)*20 - (src10+src13)*6 + (src9 +src14)*3 - (src8 +src15));\
  638. src16= *s++;\
  639. OP(dst[12], (src12+src13)*20 - (src11+src14)*6 + (src10+src15)*3 - (src9 +src16));\
  640. OP(dst[13], (src13+src14)*20 - (src12+src15)*6 + (src11+src16)*3 - (src10+src16));\
  641. OP(dst[14], (src14+src15)*20 - (src13+src16)*6 + (src12+src16)*3 - (src11+src15));\
  642. OP(dst[15], (src15+src16)*20 - (src14+src16)*6 + (src13+src15)*3 - (src12+src14));\
  643. dst+=dstStride;\
  644. src+=srcStride;\
  645. }while(--h);\
  646. }\
  647. \
  648. static void OPNAME ## mpeg4_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  649. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  650. int w=16;\
  651. do {\
  652. uint8_t *s = src, *d=dst;\
  653. int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
  654. int src9,src10,src11,src12,src13,src14,src15,src16;\
  655. src0 = *s; s+=srcStride; \
  656. src1 = *s; s+=srcStride; \
  657. src2 = *s; s+=srcStride; \
  658. src3 = *s; s+=srcStride; \
  659. src4 = *s; s+=srcStride; \
  660. OP(*d, (src0 +src1 )*20 - (src0 +src2 )*6 + (src1 +src3 )*3 - (src2 +src4 ));d+=dstStride;\
  661. src5 = *s; s+=srcStride; \
  662. OP(*d, (src1 +src2 )*20 - (src0 +src3 )*6 + (src0 +src4 )*3 - (src1 +src5 ));d+=dstStride;\
  663. src6 = *s; s+=srcStride; \
  664. OP(*d, (src2 +src3 )*20 - (src1 +src4 )*6 + (src0 +src5 )*3 - (src0 +src6 ));d+=dstStride;\
  665. src7 = *s; s+=srcStride; \
  666. OP(*d, (src3 +src4 )*20 - (src2 +src5 )*6 + (src1 +src6 )*3 - (src0 +src7 ));d+=dstStride;\
  667. src8 = *s; s+=srcStride; \
  668. OP(*d, (src4 +src5 )*20 - (src3 +src6 )*6 + (src2 +src7 )*3 - (src1 +src8 ));d+=dstStride;\
  669. src9 = *s; s+=srcStride; \
  670. OP(*d, (src5 +src6 )*20 - (src4 +src7 )*6 + (src3 +src8 )*3 - (src2 +src9 ));d+=dstStride;\
  671. src10 = *s; s+=srcStride; \
  672. OP(*d, (src6 +src7 )*20 - (src5 +src8 )*6 + (src4 +src9 )*3 - (src3 +src10));d+=dstStride;\
  673. src11 = *s; s+=srcStride; \
  674. OP(*d, (src7 +src8 )*20 - (src6 +src9 )*6 + (src5 +src10)*3 - (src4 +src11));d+=dstStride;\
  675. src12 = *s; s+=srcStride; \
  676. OP(*d, (src8 +src9 )*20 - (src7 +src10)*6 + (src6 +src11)*3 - (src5 +src12));d+=dstStride;\
  677. src13 = *s; s+=srcStride; \
  678. OP(*d, (src9 +src10)*20 - (src8 +src11)*6 + (src7 +src12)*3 - (src6 +src13));d+=dstStride;\
  679. src14 = *s; s+=srcStride; \
  680. OP(*d, (src10+src11)*20 - (src9 +src12)*6 + (src8 +src13)*3 - (src7 +src14));d+=dstStride;\
  681. src15 = *s; s+=srcStride; \
  682. OP(*d, (src11+src12)*20 - (src10+src13)*6 + (src9 +src14)*3 - (src8 +src15));d+=dstStride;\
  683. src16 = *s; \
  684. OP(*d, (src12+src13)*20 - (src11+src14)*6 + (src10+src15)*3 - (src9 +src16));d+=dstStride;\
  685. OP(*d, (src13+src14)*20 - (src12+src15)*6 + (src11+src16)*3 - (src10+src16));d+=dstStride;\
  686. OP(*d, (src14+src15)*20 - (src13+src16)*6 + (src12+src16)*3 - (src11+src15));d+=dstStride;\
  687. OP(*d, (src15+src16)*20 - (src14+src16)*6 + (src13+src15)*3 - (src12+src14));\
  688. dst++;\
  689. src++;\
  690. }while(--w);\
  691. }\
  692. \
  693. static void OPNAME ## qpel8_mc00_c (uint8_t *dst, uint8_t *src, int stride){\
  694. OPNAME ## pixels8_c(dst, src, stride, 8);\
  695. }\
  696. \
  697. static void OPNAME ## qpel8_mc10_c(uint8_t *dst, uint8_t *src, int stride){\
  698. uint8_t half[64];\
  699. put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
  700. OPNAME ## pixels8_l2_aligned2(dst, src, half, stride, stride, 8, 8);\
  701. }\
  702. \
  703. static void OPNAME ## qpel8_mc20_c(uint8_t *dst, uint8_t *src, int stride){\
  704. OPNAME ## mpeg4_qpel8_h_lowpass(dst, src, stride, stride, 8);\
  705. }\
  706. \
  707. static void OPNAME ## qpel8_mc30_c(uint8_t *dst, uint8_t *src, int stride){\
  708. uint8_t half[64];\
  709. put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
  710. OPNAME ## pixels8_l2_aligned2(dst, src+1, half, stride, stride, 8, 8);\
  711. }\
  712. \
  713. static void OPNAME ## qpel8_mc01_c(uint8_t *dst, uint8_t *src, int stride){\
  714. uint8_t full[16*9];\
  715. uint8_t half[64];\
  716. copy_block9(full, src, 16, stride, 9);\
  717. put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
  718. OPNAME ## pixels8_l2_aligned(dst, full, half, stride, 16, 8, 8);\
  719. }\
  720. \
  721. static void OPNAME ## qpel8_mc02_c(uint8_t *dst, uint8_t *src, int stride){\
  722. uint8_t full[16*9];\
  723. copy_block9(full, src, 16, stride, 9);\
  724. OPNAME ## mpeg4_qpel8_v_lowpass(dst, full, stride, 16);\
  725. }\
  726. \
  727. static void OPNAME ## qpel8_mc03_c(uint8_t *dst, uint8_t *src, int stride){\
  728. uint8_t full[16*9];\
  729. uint8_t half[64];\
  730. copy_block9(full, src, 16, stride, 9);\
  731. put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
  732. OPNAME ## pixels8_l2_aligned(dst, full+16, half, stride, 16, 8, 8);\
  733. }\
  734. static void ff_ ## OPNAME ## qpel8_mc11_old_c(uint8_t *dst, uint8_t *src, int stride){\
  735. uint8_t full[16*9];\
  736. uint8_t halfH[72];\
  737. uint8_t halfV[64];\
  738. uint8_t halfHV[64];\
  739. copy_block9(full, src, 16, stride, 9);\
  740. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  741. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
  742. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  743. OPNAME ## pixels8_l4_aligned(dst, full, halfH, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
  744. }\
  745. static void OPNAME ## qpel8_mc11_c(uint8_t *dst, uint8_t *src, int stride){\
  746. uint8_t full[16*9];\
  747. uint8_t halfH[72];\
  748. uint8_t halfHV[64];\
  749. copy_block9(full, src, 16, stride, 9);\
  750. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  751. put ## RND ## pixels8_l2_aligned(halfH, halfH, full, 8, 8, 16, 9);\
  752. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  753. OPNAME ## pixels8_l2_aligned(dst, halfH, halfHV, stride, 8, 8, 8);\
  754. }\
  755. static void ff_ ## OPNAME ## qpel8_mc31_old_c(uint8_t *dst, uint8_t *src, int stride){\
  756. uint8_t full[16*9];\
  757. uint8_t halfH[72];\
  758. uint8_t halfV[64];\
  759. uint8_t halfHV[64];\
  760. copy_block9(full, src, 16, stride, 9);\
  761. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  762. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
  763. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  764. OPNAME ## pixels8_l4_aligned0(dst, full+1, halfH, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
  765. }\
  766. static void OPNAME ## qpel8_mc31_c(uint8_t *dst, uint8_t *src, int stride){\
  767. uint8_t full[16*9];\
  768. uint8_t halfH[72];\
  769. uint8_t halfHV[64];\
  770. copy_block9(full, src, 16, stride, 9);\
  771. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  772. put ## RND ## pixels8_l2_aligned1(halfH, halfH, full+1, 8, 8, 16, 9);\
  773. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  774. OPNAME ## pixels8_l2_aligned(dst, halfH, halfHV, stride, 8, 8, 8);\
  775. }\
  776. static void ff_ ## OPNAME ## qpel8_mc13_old_c(uint8_t *dst, uint8_t *src, int stride){\
  777. uint8_t full[16*9];\
  778. uint8_t halfH[72];\
  779. uint8_t halfV[64];\
  780. uint8_t halfHV[64];\
  781. copy_block9(full, src, 16, stride, 9);\
  782. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  783. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
  784. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  785. OPNAME ## pixels8_l4_aligned(dst, full+16, halfH+8, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
  786. }\
  787. static void OPNAME ## qpel8_mc13_c(uint8_t *dst, uint8_t *src, int stride){\
  788. uint8_t full[16*9];\
  789. uint8_t halfH[72];\
  790. uint8_t halfHV[64];\
  791. copy_block9(full, src, 16, stride, 9);\
  792. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  793. put ## RND ## pixels8_l2_aligned(halfH, halfH, full, 8, 8, 16, 9);\
  794. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  795. OPNAME ## pixels8_l2_aligned(dst, halfH+8, halfHV, stride, 8, 8, 8);\
  796. }\
  797. static void ff_ ## OPNAME ## qpel8_mc33_old_c(uint8_t *dst, uint8_t *src, int stride){\
  798. uint8_t full[16*9];\
  799. uint8_t halfH[72];\
  800. uint8_t halfV[64];\
  801. uint8_t halfHV[64];\
  802. copy_block9(full, src, 16, stride, 9);\
  803. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full , 8, 16, 9);\
  804. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
  805. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  806. OPNAME ## pixels8_l4_aligned0(dst, full+17, halfH+8, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
  807. }\
  808. static void OPNAME ## qpel8_mc33_c(uint8_t *dst, uint8_t *src, int stride){\
  809. uint8_t full[16*9];\
  810. uint8_t halfH[72];\
  811. uint8_t halfHV[64];\
  812. copy_block9(full, src, 16, stride, 9);\
  813. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  814. put ## RND ## pixels8_l2_aligned1(halfH, halfH, full+1, 8, 8, 16, 9);\
  815. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  816. OPNAME ## pixels8_l2_aligned(dst, halfH+8, halfHV, stride, 8, 8, 8);\
  817. }\
  818. static void OPNAME ## qpel8_mc21_c(uint8_t *dst, uint8_t *src, int stride){\
  819. uint8_t halfH[72];\
  820. uint8_t halfHV[64];\
  821. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
  822. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  823. OPNAME ## pixels8_l2_aligned(dst, halfH, halfHV, stride, 8, 8, 8);\
  824. }\
  825. static void OPNAME ## qpel8_mc23_c(uint8_t *dst, uint8_t *src, int stride){\
  826. uint8_t halfH[72];\
  827. uint8_t halfHV[64];\
  828. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
  829. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  830. OPNAME ## pixels8_l2_aligned(dst, halfH+8, halfHV, stride, 8, 8, 8);\
  831. }\
  832. static void ff_ ## OPNAME ## qpel8_mc12_old_c(uint8_t *dst, uint8_t *src, int stride){\
  833. uint8_t full[16*9];\
  834. uint8_t halfH[72];\
  835. uint8_t halfV[64];\
  836. uint8_t halfHV[64];\
  837. copy_block9(full, src, 16, stride, 9);\
  838. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  839. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
  840. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  841. OPNAME ## pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);\
  842. }\
  843. static void OPNAME ## qpel8_mc12_c(uint8_t *dst, uint8_t *src, int stride){\
  844. uint8_t full[16*9];\
  845. uint8_t halfH[72];\
  846. copy_block9(full, src, 16, stride, 9);\
  847. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  848. put ## RND ## pixels8_l2_aligned(halfH, halfH, full, 8, 8, 16, 9);\
  849. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
  850. }\
  851. static void ff_ ## OPNAME ## qpel8_mc32_old_c(uint8_t *dst, uint8_t *src, int stride){\
  852. uint8_t full[16*9];\
  853. uint8_t halfH[72];\
  854. uint8_t halfV[64];\
  855. uint8_t halfHV[64];\
  856. copy_block9(full, src, 16, stride, 9);\
  857. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  858. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
  859. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  860. OPNAME ## pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);\
  861. }\
  862. static void OPNAME ## qpel8_mc32_c(uint8_t *dst, uint8_t *src, int stride){\
  863. uint8_t full[16*9];\
  864. uint8_t halfH[72];\
  865. copy_block9(full, src, 16, stride, 9);\
  866. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  867. put ## RND ## pixels8_l2_aligned1(halfH, halfH, full+1, 8, 8, 16, 9);\
  868. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
  869. }\
  870. static void OPNAME ## qpel8_mc22_c(uint8_t *dst, uint8_t *src, int stride){\
  871. uint8_t halfH[72];\
  872. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
  873. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
  874. }\
  875. static void OPNAME ## qpel16_mc00_c (uint8_t *dst, uint8_t *src, int stride){\
  876. OPNAME ## pixels16_c(dst, src, stride, 16);\
  877. }\
  878. \
  879. static void OPNAME ## qpel16_mc10_c(uint8_t *dst, uint8_t *src, int stride){\
  880. uint8_t half[256];\
  881. put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
  882. OPNAME ## pixels16_l2_aligned2(dst, src, half, stride, stride, 16, 16);\
  883. }\
  884. \
  885. static void OPNAME ## qpel16_mc20_c(uint8_t *dst, uint8_t *src, int stride){\
  886. OPNAME ## mpeg4_qpel16_h_lowpass(dst, src, stride, stride, 16);\
  887. }\
  888. \
  889. static void OPNAME ## qpel16_mc30_c(uint8_t *dst, uint8_t *src, int stride){\
  890. uint8_t half[256];\
  891. put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
  892. OPNAME ## pixels16_l2_aligned2(dst, src+1, half, stride, stride, 16, 16);\
  893. }\
  894. \
  895. static void OPNAME ## qpel16_mc01_c(uint8_t *dst, uint8_t *src, int stride){\
  896. uint8_t full[24*17];\
  897. uint8_t half[256];\
  898. copy_block17(full, src, 24, stride, 17);\
  899. put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
  900. OPNAME ## pixels16_l2_aligned(dst, full, half, stride, 24, 16, 16);\
  901. }\
  902. \
  903. static void OPNAME ## qpel16_mc02_c(uint8_t *dst, uint8_t *src, int stride){\
  904. uint8_t full[24*17];\
  905. copy_block17(full, src, 24, stride, 17);\
  906. OPNAME ## mpeg4_qpel16_v_lowpass(dst, full, stride, 24);\
  907. }\
  908. \
  909. static void OPNAME ## qpel16_mc03_c(uint8_t *dst, uint8_t *src, int stride){\
  910. uint8_t full[24*17];\
  911. uint8_t half[256];\
  912. copy_block17(full, src, 24, stride, 17);\
  913. put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
  914. OPNAME ## pixels16_l2_aligned(dst, full+24, half, stride, 24, 16, 16);\
  915. }\
  916. static void ff_ ## OPNAME ## qpel16_mc11_old_c(uint8_t *dst, uint8_t *src, int stride){\
  917. uint8_t full[24*17];\
  918. uint8_t halfH[272];\
  919. uint8_t halfV[256];\
  920. uint8_t halfHV[256];\
  921. copy_block17(full, src, 24, stride, 17);\
  922. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  923. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
  924. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  925. OPNAME ## pixels16_l4_aligned(dst, full, halfH, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
  926. }\
  927. static void OPNAME ## qpel16_mc11_c(uint8_t *dst, uint8_t *src, int stride){\
  928. uint8_t full[24*17];\
  929. uint8_t halfH[272];\
  930. uint8_t halfHV[256];\
  931. copy_block17(full, src, 24, stride, 17);\
  932. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  933. put ## RND ## pixels16_l2_aligned(halfH, halfH, full, 16, 16, 24, 17);\
  934. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  935. OPNAME ## pixels16_l2_aligned(dst, halfH, halfHV, stride, 16, 16, 16);\
  936. }\
  937. static void ff_ ## OPNAME ## qpel16_mc31_old_c(uint8_t *dst, uint8_t *src, int stride){\
  938. uint8_t full[24*17];\
  939. uint8_t halfH[272];\
  940. uint8_t halfV[256];\
  941. uint8_t halfHV[256];\
  942. copy_block17(full, src, 24, stride, 17);\
  943. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  944. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
  945. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  946. OPNAME ## pixels16_l4_aligned0(dst, full+1, halfH, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
  947. }\
  948. static void OPNAME ## qpel16_mc31_c(uint8_t *dst, uint8_t *src, int stride){\
  949. uint8_t full[24*17];\
  950. uint8_t halfH[272];\
  951. uint8_t halfHV[256];\
  952. copy_block17(full, src, 24, stride, 17);\
  953. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  954. put ## RND ## pixels16_l2_aligned1(halfH, halfH, full+1, 16, 16, 24, 17);\
  955. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  956. OPNAME ## pixels16_l2_aligned(dst, halfH, halfHV, stride, 16, 16, 16);\
  957. }\
  958. static void ff_ ## OPNAME ## qpel16_mc13_old_c(uint8_t *dst, uint8_t *src, int stride){\
  959. uint8_t full[24*17];\
  960. uint8_t halfH[272];\
  961. uint8_t halfV[256];\
  962. uint8_t halfHV[256];\
  963. copy_block17(full, src, 24, stride, 17);\
  964. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  965. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
  966. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  967. OPNAME ## pixels16_l4_aligned(dst, full+24, halfH+16, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
  968. }\
  969. static void OPNAME ## qpel16_mc13_c(uint8_t *dst, uint8_t *src, int stride){\
  970. uint8_t full[24*17];\
  971. uint8_t halfH[272];\
  972. uint8_t halfHV[256];\
  973. copy_block17(full, src, 24, stride, 17);\
  974. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  975. put ## RND ## pixels16_l2_aligned(halfH, halfH, full, 16, 16, 24, 17);\
  976. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  977. OPNAME ## pixels16_l2_aligned(dst, halfH+16, halfHV, stride, 16, 16, 16);\
  978. }\
  979. static void ff_ ## OPNAME ## qpel16_mc33_old_c(uint8_t *dst, uint8_t *src, int stride){\
  980. uint8_t full[24*17];\
  981. uint8_t halfH[272];\
  982. uint8_t halfV[256];\
  983. uint8_t halfHV[256];\
  984. copy_block17(full, src, 24, stride, 17);\
  985. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full , 16, 24, 17);\
  986. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
  987. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  988. OPNAME ## pixels16_l4_aligned0(dst, full+25, halfH+16, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
  989. }\
  990. static void OPNAME ## qpel16_mc33_c(uint8_t *dst, uint8_t *src, int stride){\
  991. uint8_t full[24*17];\
  992. uint8_t halfH[272];\
  993. uint8_t halfHV[256];\
  994. copy_block17(full, src, 24, stride, 17);\
  995. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  996. put ## RND ## pixels16_l2_aligned1(halfH, halfH, full+1, 16, 16, 24, 17);\
  997. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  998. OPNAME ## pixels16_l2_aligned(dst, halfH+16, halfHV, stride, 16, 16, 16);\
  999. }\
  1000. static void OPNAME ## qpel16_mc21_c(uint8_t *dst, uint8_t *src, int stride){\
  1001. uint8_t halfH[272];\
  1002. uint8_t halfHV[256];\
  1003. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
  1004. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1005. OPNAME ## pixels16_l2_aligned(dst, halfH, halfHV, stride, 16, 16, 16);\
  1006. }\
  1007. static void OPNAME ## qpel16_mc23_c(uint8_t *dst, uint8_t *src, int stride){\
  1008. uint8_t halfH[272];\
  1009. uint8_t halfHV[256];\
  1010. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
  1011. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1012. OPNAME ## pixels16_l2_aligned(dst, halfH+16, halfHV, stride, 16, 16, 16);\
  1013. }\
  1014. static void ff_ ## OPNAME ## qpel16_mc12_old_c(uint8_t *dst, uint8_t *src, int stride){\
  1015. uint8_t full[24*17];\
  1016. uint8_t halfH[272];\
  1017. uint8_t halfV[256];\
  1018. uint8_t halfHV[256];\
  1019. copy_block17(full, src, 24, stride, 17);\
  1020. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1021. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
  1022. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1023. OPNAME ## pixels16_l2_aligned(dst, halfV, halfHV, stride, 16, 16, 16);\
  1024. }\
  1025. static void OPNAME ## qpel16_mc12_c(uint8_t *dst, uint8_t *src, int stride){\
  1026. uint8_t full[24*17];\
  1027. uint8_t halfH[272];\
  1028. copy_block17(full, src, 24, stride, 17);\
  1029. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1030. put ## RND ## pixels16_l2_aligned(halfH, halfH, full, 16, 16, 24, 17);\
  1031. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
  1032. }\
  1033. static void ff_ ## OPNAME ## qpel16_mc32_old_c(uint8_t *dst, uint8_t *src, int stride){\
  1034. uint8_t full[24*17];\
  1035. uint8_t halfH[272];\
  1036. uint8_t halfV[256];\
  1037. uint8_t halfHV[256];\
  1038. copy_block17(full, src, 24, stride, 17);\
  1039. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1040. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
  1041. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1042. OPNAME ## pixels16_l2_aligned(dst, halfV, halfHV, stride, 16, 16, 16);\
  1043. }\
  1044. static void OPNAME ## qpel16_mc32_c(uint8_t *dst, uint8_t *src, int stride){\
  1045. uint8_t full[24*17];\
  1046. uint8_t halfH[272];\
  1047. copy_block17(full, src, 24, stride, 17);\
  1048. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1049. put ## RND ## pixels16_l2_aligned1(halfH, halfH, full+1, 16, 16, 24, 17);\
  1050. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
  1051. }\
  1052. static void OPNAME ## qpel16_mc22_c(uint8_t *dst, uint8_t *src, int stride){\
  1053. uint8_t halfH[272];\
  1054. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
  1055. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
  1056. }
  1057. #define op_avg(a, b) a = (((a)+cm[((b) + 16)>>5]+1)>>1)
  1058. #define op_avg_no_rnd(a, b) a = (((a)+cm[((b) + 15)>>5])>>1)
  1059. #define op_put(a, b) a = cm[((b) + 16)>>5]
  1060. #define op_put_no_rnd(a, b) a = cm[((b) + 15)>>5]
  1061. QPEL_MC(0, put_ , _ , op_put)
  1062. QPEL_MC(1, put_no_rnd_, _no_rnd_, op_put_no_rnd)
  1063. QPEL_MC(0, avg_ , _ , op_avg)
  1064. //QPEL_MC(1, avg_no_rnd , _ , op_avg)
  1065. #undef op_avg
  1066. #undef op_avg_no_rnd
  1067. #undef op_put
  1068. #undef op_put_no_rnd
  1069. #if 1
  1070. #define H264_LOWPASS(OPNAME, OP, OP2) \
  1071. static inline void OPNAME ## h264_qpel_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,int w,int h){\
  1072. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  1073. do {\
  1074. int srcB,srcA,src0,src1,src2,src3,src4,src5,src6;\
  1075. uint8_t *s = src-2;\
  1076. srcB = *s++;\
  1077. srcA = *s++;\
  1078. src0 = *s++;\
  1079. src1 = *s++;\
  1080. src2 = *s++;\
  1081. src3 = *s++;\
  1082. OP(dst[0], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
  1083. src4 = *s++;\
  1084. OP(dst[1], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
  1085. src5 = *s++;\
  1086. OP(dst[2], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
  1087. src6 = *s++;\
  1088. OP(dst[3], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
  1089. if (w>4) { /* it optimized */ \
  1090. int src7,src8,src9,src10; \
  1091. src7 = *s++;\
  1092. OP(dst[4], (src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
  1093. src8 = *s++;\
  1094. OP(dst[5], (src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
  1095. src9 = *s++;\
  1096. OP(dst[6], (src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
  1097. src10 = *s++;\
  1098. OP(dst[7], (src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
  1099. if (w>8) { \
  1100. int src11,src12,src13,src14,src15,src16,src17,src18; \
  1101. src11 = *s++;\
  1102. OP(dst[8] , (src8 +src9 )*20 - (src7 +src10)*5 + (src6 +src11));\
  1103. src12 = *s++;\
  1104. OP(dst[9] , (src9 +src10)*20 - (src8 +src11)*5 + (src7 +src12));\
  1105. src13 = *s++;\
  1106. OP(dst[10], (src10+src11)*20 - (src9 +src12)*5 + (src8 +src13));\
  1107. src14 = *s++;\
  1108. OP(dst[11], (src11+src12)*20 - (src10+src13)*5 + (src9 +src14));\
  1109. src15 = *s++;\
  1110. OP(dst[12], (src12+src13)*20 - (src11+src14)*5 + (src10+src15));\
  1111. src16 = *s++;\
  1112. OP(dst[13], (src13+src14)*20 - (src12+src15)*5 + (src11+src16));\
  1113. src17 = *s++;\
  1114. OP(dst[14], (src14+src15)*20 - (src13+src16)*5 + (src12+src17));\
  1115. src18 = *s++;\
  1116. OP(dst[15], (src15+src16)*20 - (src14+src17)*5 + (src13+src18));\
  1117. } \
  1118. } \
  1119. dst+=dstStride;\
  1120. src+=srcStride;\
  1121. }while(--h);\
  1122. }\
  1123. \
  1124. static inline void OPNAME ## h264_qpel_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,int w,int h){\
  1125. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  1126. do{\
  1127. int srcB,srcA,src0,src1,src2,src3,src4,src5,src6;\
  1128. uint8_t *s = src-2*srcStride,*d=dst;\
  1129. srcB = *s; s+=srcStride;\
  1130. srcA = *s; s+=srcStride;\
  1131. src0 = *s; s+=srcStride;\
  1132. src1 = *s; s+=srcStride;\
  1133. src2 = *s; s+=srcStride;\
  1134. src3 = *s; s+=srcStride;\
  1135. OP(*d, (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));d+=dstStride;\
  1136. src4 = *s; s+=srcStride;\
  1137. OP(*d, (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));d+=dstStride;\
  1138. src5 = *s; s+=srcStride;\
  1139. OP(*d, (src2+src3)*20 - (src1+src4)*5 + (src0+src5));d+=dstStride;\
  1140. src6 = *s; s+=srcStride;\
  1141. OP(*d, (src3+src4)*20 - (src2+src5)*5 + (src1+src6));d+=dstStride;\
  1142. if (h>4) { \
  1143. int src7,src8,src9,src10; \
  1144. src7 = *s; s+=srcStride;\
  1145. OP(*d, (src4+src5)*20 - (src3+src6)*5 + (src2+src7));d+=dstStride;\
  1146. src8 = *s; s+=srcStride;\
  1147. OP(*d, (src5+src6)*20 - (src4+src7)*5 + (src3+src8));d+=dstStride;\
  1148. src9 = *s; s+=srcStride;\
  1149. OP(*d, (src6+src7)*20 - (src5+src8)*5 + (src4+src9));d+=dstStride;\
  1150. src10 = *s; s+=srcStride;\
  1151. OP(*d, (src7+src8)*20 - (src6+src9)*5 + (src5+src10));d+=dstStride;\
  1152. if (h>8) { \
  1153. int src11,src12,src13,src14,src15,src16,src17,src18; \
  1154. src11 = *s; s+=srcStride;\
  1155. OP(*d , (src8 +src9 )*20 - (src7 +src10)*5 + (src6 +src11));d+=dstStride;\
  1156. src12 = *s; s+=srcStride;\
  1157. OP(*d , (src9 +src10)*20 - (src8 +src11)*5 + (src7 +src12));d+=dstStride;\
  1158. src13 = *s; s+=srcStride;\
  1159. OP(*d, (src10+src11)*20 - (src9 +src12)*5 + (src8 +src13));d+=dstStride;\
  1160. src14 = *s; s+=srcStride;\
  1161. OP(*d, (src11+src12)*20 - (src10+src13)*5 + (src9 +src14));d+=dstStride;\
  1162. src15 = *s; s+=srcStride;\
  1163. OP(*d, (src12+src13)*20 - (src11+src14)*5 + (src10+src15));d+=dstStride;\
  1164. src16 = *s; s+=srcStride;\
  1165. OP(*d, (src13+src14)*20 - (src12+src15)*5 + (src11+src16));d+=dstStride;\
  1166. src17 = *s; s+=srcStride;\
  1167. OP(*d, (src14+src15)*20 - (src13+src16)*5 + (src12+src17));d+=dstStride;\
  1168. src18 = *s; s+=srcStride;\
  1169. OP(*d, (src15+src16)*20 - (src14+src17)*5 + (src13+src18));d+=dstStride;\
  1170. } \
  1171. } \
  1172. dst++;\
  1173. src++;\
  1174. }while(--w);\
  1175. }\
  1176. \
  1177. static inline void OPNAME ## h264_qpel_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride,int w,int h){\
  1178. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  1179. int i;\
  1180. src -= 2*srcStride;\
  1181. i= h+5; \
  1182. do {\
  1183. int srcB,srcA,src0,src1,src2,src3,src4,src5,src6;\
  1184. uint8_t *s = src-2;\
  1185. srcB = *s++;\
  1186. srcA = *s++;\
  1187. src0 = *s++;\
  1188. src1 = *s++;\
  1189. src2 = *s++;\
  1190. src3 = *s++;\
  1191. tmp[0] = ((src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
  1192. src4 = *s++;\
  1193. tmp[1] = ((src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
  1194. src5 = *s++;\
  1195. tmp[2] = ((src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
  1196. src6 = *s++;\
  1197. tmp[3] = ((src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
  1198. if (w>4) { /* it optimized */ \
  1199. int src7,src8,src9,src10; \
  1200. src7 = *s++;\
  1201. tmp[4] = ((src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
  1202. src8 = *s++;\
  1203. tmp[5] = ((src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
  1204. src9 = *s++;\
  1205. tmp[6] = ((src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
  1206. src10 = *s++;\
  1207. tmp[7] = ((src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
  1208. if (w>8) { \
  1209. int src11,src12,src13,src14,src15,src16,src17,src18; \
  1210. src11 = *s++;\
  1211. tmp[8] = ((src8 +src9 )*20 - (src7 +src10)*5 + (src6 +src11));\
  1212. src12 = *s++;\
  1213. tmp[9] = ((src9 +src10)*20 - (src8 +src11)*5 + (src7 +src12));\
  1214. src13 = *s++;\
  1215. tmp[10] = ((src10+src11)*20 - (src9 +src12)*5 + (src8 +src13));\
  1216. src14 = *s++;\
  1217. tmp[11] = ((src11+src12)*20 - (src10+src13)*5 + (src9 +src14));\
  1218. src15 = *s++;\
  1219. tmp[12] = ((src12+src13)*20 - (src11+src14)*5 + (src10+src15));\
  1220. src16 = *s++;\
  1221. tmp[13] = ((src13+src14)*20 - (src12+src15)*5 + (src11+src16));\
  1222. src17 = *s++;\
  1223. tmp[14] = ((src14+src15)*20 - (src13+src16)*5 + (src12+src17));\
  1224. src18 = *s++;\
  1225. tmp[15] = ((src15+src16)*20 - (src14+src17)*5 + (src13+src18));\
  1226. } \
  1227. } \
  1228. tmp+=tmpStride;\
  1229. src+=srcStride;\
  1230. }while(--i);\
  1231. tmp -= tmpStride*(h+5-2);\
  1232. i = w; \
  1233. do {\
  1234. int tmpB,tmpA,tmp0,tmp1,tmp2,tmp3,tmp4,tmp5,tmp6;\
  1235. int16_t *s = tmp-2*tmpStride; \
  1236. uint8_t *d=dst;\
  1237. tmpB = *s; s+=tmpStride;\
  1238. tmpA = *s; s+=tmpStride;\
  1239. tmp0 = *s; s+=tmpStride;\
  1240. tmp1 = *s; s+=tmpStride;\
  1241. tmp2 = *s; s+=tmpStride;\
  1242. tmp3 = *s; s+=tmpStride;\
  1243. OP2(*d, (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));d+=dstStride;\
  1244. tmp4 = *s; s+=tmpStride;\
  1245. OP2(*d, (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));d+=dstStride;\
  1246. tmp5 = *s; s+=tmpStride;\
  1247. OP2(*d, (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));d+=dstStride;\
  1248. tmp6 = *s; s+=tmpStride;\
  1249. OP2(*d, (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));d+=dstStride;\
  1250. if (h>4) { \
  1251. int tmp7,tmp8,tmp9,tmp10; \
  1252. tmp7 = *s; s+=tmpStride;\
  1253. OP2(*d, (tmp4+tmp5)*20 - (tmp3+tmp6)*5 + (tmp2+tmp7));d+=dstStride;\
  1254. tmp8 = *s; s+=tmpStride;\
  1255. OP2(*d, (tmp5+tmp6)*20 - (tmp4+tmp7)*5 + (tmp3+tmp8));d+=dstStride;\
  1256. tmp9 = *s; s+=tmpStride;\
  1257. OP2(*d, (tmp6+tmp7)*20 - (tmp5+tmp8)*5 + (tmp4+tmp9));d+=dstStride;\
  1258. tmp10 = *s; s+=tmpStride;\
  1259. OP2(*d, (tmp7+tmp8)*20 - (tmp6+tmp9)*5 + (tmp5+tmp10));d+=dstStride;\
  1260. if (h>8) { \
  1261. int tmp11,tmp12,tmp13,tmp14,tmp15,tmp16,tmp17,tmp18; \
  1262. tmp11 = *s; s+=tmpStride;\
  1263. OP2(*d , (tmp8 +tmp9 )*20 - (tmp7 +tmp10)*5 + (tmp6 +tmp11));d+=dstStride;\
  1264. tmp12 = *s; s+=tmpStride;\
  1265. OP2(*d , (tmp9 +tmp10)*20 - (tmp8 +tmp11)*5 + (tmp7 +tmp12));d+=dstStride;\
  1266. tmp13 = *s; s+=tmpStride;\
  1267. OP2(*d, (tmp10+tmp11)*20 - (tmp9 +tmp12)*5 + (tmp8 +tmp13));d+=dstStride;\
  1268. tmp14 = *s; s+=tmpStride;\
  1269. OP2(*d, (tmp11+tmp12)*20 - (tmp10+tmp13)*5 + (tmp9 +tmp14));d+=dstStride;\
  1270. tmp15 = *s; s+=tmpStride;\
  1271. OP2(*d, (tmp12+tmp13)*20 - (tmp11+tmp14)*5 + (tmp10+tmp15));d+=dstStride;\
  1272. tmp16 = *s; s+=tmpStride;\
  1273. OP2(*d, (tmp13+tmp14)*20 - (tmp12+tmp15)*5 + (tmp11+tmp16));d+=dstStride;\
  1274. tmp17 = *s; s+=tmpStride;\
  1275. OP2(*d, (tmp14+tmp15)*20 - (tmp13+tmp16)*5 + (tmp12+tmp17));d+=dstStride;\
  1276. tmp18 = *s; s+=tmpStride;\
  1277. OP2(*d, (tmp15+tmp16)*20 - (tmp14+tmp17)*5 + (tmp13+tmp18));d+=dstStride;\
  1278. } \
  1279. } \
  1280. dst++;\
  1281. tmp++;\
  1282. }while(--i);\
  1283. }\
  1284. \
  1285. static void OPNAME ## h264_qpel4_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1286. OPNAME ## h264_qpel_h_lowpass(dst,src,dstStride,srcStride,4,4); \
  1287. }\
  1288. static void OPNAME ## h264_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1289. OPNAME ## h264_qpel_h_lowpass(dst,src,dstStride,srcStride,8,8); \
  1290. }\
  1291. static void OPNAME ## h264_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1292. OPNAME ## h264_qpel_h_lowpass(dst,src,dstStride,srcStride,16,16); \
  1293. }\
  1294. \
  1295. static void OPNAME ## h264_qpel4_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1296. OPNAME ## h264_qpel_v_lowpass(dst,src,dstStride,srcStride,4,4); \
  1297. }\
  1298. static void OPNAME ## h264_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1299. OPNAME ## h264_qpel_v_lowpass(dst,src,dstStride,srcStride,8,8); \
  1300. }\
  1301. static void OPNAME ## h264_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1302. OPNAME ## h264_qpel_v_lowpass(dst,src,dstStride,srcStride,16,16); \
  1303. }\
  1304. static void OPNAME ## h264_qpel4_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  1305. OPNAME ## h264_qpel_hv_lowpass(dst,tmp,src,dstStride,tmpStride,srcStride,4,4); \
  1306. }\
  1307. static void OPNAME ## h264_qpel8_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  1308. OPNAME ## h264_qpel_hv_lowpass(dst,tmp,src,dstStride,tmpStride,srcStride,8,8); \
  1309. }\
  1310. static void OPNAME ## h264_qpel16_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  1311. OPNAME ## h264_qpel_hv_lowpass(dst,tmp,src,dstStride,tmpStride,srcStride,16,16); \
  1312. }\
  1313. #define H264_MC(OPNAME, SIZE) \
  1314. static void OPNAME ## h264_qpel ## SIZE ## _mc00_c (uint8_t *dst, uint8_t *src, int stride){\
  1315. OPNAME ## pixels ## SIZE ## _c(dst, src, stride, SIZE);\
  1316. }\
  1317. \
  1318. static void OPNAME ## h264_qpel ## SIZE ## _mc10_c(uint8_t *dst, uint8_t *src, int stride){\
  1319. uint8_t half[SIZE*SIZE];\
  1320. put_h264_qpel ## SIZE ## _h_lowpass(half, src, SIZE, stride);\
  1321. OPNAME ## pixels ## SIZE ## _l2_aligned2(dst, src, half, stride, stride, SIZE, SIZE);\
  1322. }\
  1323. \
  1324. static void OPNAME ## h264_qpel ## SIZE ## _mc20_c(uint8_t *dst, uint8_t *src, int stride){\
  1325. OPNAME ## h264_qpel ## SIZE ## _h_lowpass(dst, src, stride, stride);\
  1326. }\
  1327. \
  1328. static void OPNAME ## h264_qpel ## SIZE ## _mc30_c(uint8_t *dst, uint8_t *src, int stride){\
  1329. uint8_t half[SIZE*SIZE];\
  1330. put_h264_qpel ## SIZE ## _h_lowpass(half, src, SIZE, stride);\
  1331. OPNAME ## pixels ## SIZE ## _l2_aligned2(dst, src+1, half, stride, stride, SIZE, SIZE);\
  1332. }\
  1333. \
  1334. static void OPNAME ## h264_qpel ## SIZE ## _mc01_c(uint8_t *dst, uint8_t *src, int stride){\
  1335. uint8_t full[SIZE*(SIZE+5)];\
  1336. uint8_t * const full_mid= full + SIZE*2;\
  1337. uint8_t half[SIZE*SIZE];\
  1338. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1339. put_h264_qpel ## SIZE ## _v_lowpass(half, full_mid, SIZE, SIZE);\
  1340. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, full_mid, half, stride, SIZE, SIZE, SIZE);\
  1341. }\
  1342. \
  1343. static void OPNAME ## h264_qpel ## SIZE ## _mc02_c(uint8_t *dst, uint8_t *src, int stride){\
  1344. uint8_t full[SIZE*(SIZE+5)];\
  1345. uint8_t * const full_mid= full + SIZE*2;\
  1346. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1347. OPNAME ## h264_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE);\
  1348. }\
  1349. \
  1350. static void OPNAME ## h264_qpel ## SIZE ## _mc03_c(uint8_t *dst, uint8_t *src, int stride){\
  1351. uint8_t full[SIZE*(SIZE+5)];\
  1352. uint8_t * const full_mid= full + SIZE*2;\
  1353. uint8_t half[SIZE*SIZE];\
  1354. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1355. put_h264_qpel ## SIZE ## _v_lowpass(half, full_mid, SIZE, SIZE);\
  1356. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, full_mid+SIZE, half, stride, SIZE, SIZE, SIZE);\
  1357. }\
  1358. \
  1359. static void OPNAME ## h264_qpel ## SIZE ## _mc11_c(uint8_t *dst, uint8_t *src, int stride){\
  1360. uint8_t full[SIZE*(SIZE+5)];\
  1361. uint8_t * const full_mid= full + SIZE*2;\
  1362. uint8_t halfH[SIZE*SIZE];\
  1363. uint8_t halfV[SIZE*SIZE];\
  1364. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
  1365. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1366. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1367. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
  1368. }\
  1369. \
  1370. static void OPNAME ## h264_qpel ## SIZE ## _mc31_c(uint8_t *dst, uint8_t *src, int stride){\
  1371. uint8_t full[SIZE*(SIZE+5)];\
  1372. uint8_t * const full_mid= full + SIZE*2;\
  1373. uint8_t halfH[SIZE*SIZE];\
  1374. uint8_t halfV[SIZE*SIZE];\
  1375. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
  1376. copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
  1377. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1378. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
  1379. }\
  1380. \
  1381. static void OPNAME ## h264_qpel ## SIZE ## _mc13_c(uint8_t *dst, uint8_t *src, int stride){\
  1382. uint8_t full[SIZE*(SIZE+5)];\
  1383. uint8_t * const full_mid= full + SIZE*2;\
  1384. uint8_t halfH[SIZE*SIZE];\
  1385. uint8_t halfV[SIZE*SIZE];\
  1386. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
  1387. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1388. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1389. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
  1390. }\
  1391. \
  1392. static void OPNAME ## h264_qpel ## SIZE ## _mc33_c(uint8_t *dst, uint8_t *src, int stride){\
  1393. uint8_t full[SIZE*(SIZE+5)];\
  1394. uint8_t * const full_mid= full + SIZE*2;\
  1395. uint8_t halfH[SIZE*SIZE];\
  1396. uint8_t halfV[SIZE*SIZE];\
  1397. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
  1398. copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
  1399. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1400. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
  1401. }\
  1402. \
  1403. static void OPNAME ## h264_qpel ## SIZE ## _mc22_c(uint8_t *dst, uint8_t *src, int stride){\
  1404. int16_t tmp[SIZE*(SIZE+5)];\
  1405. OPNAME ## h264_qpel ## SIZE ## _hv_lowpass(dst, tmp, src, stride, SIZE, stride);\
  1406. }\
  1407. \
  1408. static void OPNAME ## h264_qpel ## SIZE ## _mc21_c(uint8_t *dst, uint8_t *src, int stride){\
  1409. int16_t tmp[SIZE*(SIZE+5)];\
  1410. uint8_t halfH[SIZE*SIZE];\
  1411. uint8_t halfHV[SIZE*SIZE];\
  1412. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
  1413. put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
  1414. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfHV, stride, SIZE, SIZE, SIZE);\
  1415. }\
  1416. \
  1417. static void OPNAME ## h264_qpel ## SIZE ## _mc23_c(uint8_t *dst, uint8_t *src, int stride){\
  1418. int16_t tmp[SIZE*(SIZE+5)];\
  1419. uint8_t halfH[SIZE*SIZE];\
  1420. uint8_t halfHV[SIZE*SIZE];\
  1421. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
  1422. put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
  1423. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfHV, stride, SIZE, SIZE, SIZE);\
  1424. }\
  1425. \
  1426. static void OPNAME ## h264_qpel ## SIZE ## _mc12_c(uint8_t *dst, uint8_t *src, int stride){\
  1427. uint8_t full[SIZE*(SIZE+5)];\
  1428. uint8_t * const full_mid= full + SIZE*2;\
  1429. int16_t tmp[SIZE*(SIZE+5)];\
  1430. uint8_t halfV[SIZE*SIZE];\
  1431. uint8_t halfHV[SIZE*SIZE];\
  1432. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1433. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1434. put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
  1435. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfV, halfHV, stride, SIZE, SIZE, SIZE);\
  1436. }\
  1437. \
  1438. static void OPNAME ## h264_qpel ## SIZE ## _mc32_c(uint8_t *dst, uint8_t *src, int stride){\
  1439. uint8_t full[SIZE*(SIZE+5)];\
  1440. uint8_t * const full_mid= full + SIZE*2;\
  1441. int16_t tmp[SIZE*(SIZE+5)];\
  1442. uint8_t halfV[SIZE*SIZE];\
  1443. uint8_t halfHV[SIZE*SIZE];\
  1444. copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
  1445. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1446. put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
  1447. OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfV, halfHV, stride, SIZE, SIZE, SIZE);\
  1448. }\
  1449. #define op_avg(a, b) a = (((a)+cm[((b) + 16)>>5]+1)>>1)
  1450. //#define op_avg2(a, b) a = (((a)*w1+cm[((b) + 16)>>5]*w2 + o + 64)>>7)
  1451. #define op_put(a, b) a = cm[((b) + 16)>>5]
  1452. #define op2_avg(a, b) a = (((a)+cm[((b) + 512)>>10]+1)>>1)
  1453. #define op2_put(a, b) a = cm[((b) + 512)>>10]
  1454. H264_LOWPASS(put_ , op_put, op2_put)
  1455. H264_LOWPASS(avg_ , op_avg, op2_avg)
  1456. H264_MC(put_, 4)
  1457. H264_MC(put_, 8)
  1458. H264_MC(put_, 16)
  1459. H264_MC(avg_, 4)
  1460. H264_MC(avg_, 8)
  1461. H264_MC(avg_, 16)
  1462. #undef op_avg
  1463. #undef op_put
  1464. #undef op2_avg
  1465. #undef op2_put
  1466. #endif
  1467. static void wmv2_mspel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){
  1468. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  1469. do{
  1470. int src_1,src0,src1,src2,src3,src4,src5,src6,src7,src8,src9;
  1471. uint8_t *s = src;
  1472. src_1 = s[-1];
  1473. src0 = *s++;
  1474. src1 = *s++;
  1475. src2 = *s++;
  1476. dst[0]= cm[(9*(src0 + src1) - (src_1 + src2) + 8)>>4];
  1477. src3 = *s++;
  1478. dst[1]= cm[(9*(src1 + src2) - (src0 + src3) + 8)>>4];
  1479. src4 = *s++;
  1480. dst[2]= cm[(9*(src2 + src3) - (src1 + src4) + 8)>>4];
  1481. src5 = *s++;
  1482. dst[3]= cm[(9*(src3 + src4) - (src2 + src5) + 8)>>4];
  1483. src6 = *s++;
  1484. dst[4]= cm[(9*(src4 + src5) - (src3 + src6) + 8)>>4];
  1485. src7 = *s++;
  1486. dst[5]= cm[(9*(src5 + src6) - (src4 + src7) + 8)>>4];
  1487. src8 = *s++;
  1488. dst[6]= cm[(9*(src6 + src7) - (src5 + src8) + 8)>>4];
  1489. src9 = *s++;
  1490. dst[7]= cm[(9*(src7 + src8) - (src6 + src9) + 8)>>4];
  1491. dst+=dstStride;
  1492. src+=srcStride;
  1493. }while(--h);
  1494. }
  1495. static void wmv2_mspel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int w){
  1496. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  1497. do{
  1498. int src_1,src0,src1,src2,src3,src4,src5,src6,src7,src8,src9;
  1499. uint8_t *s = src,*d = dst;
  1500. src_1 = *(s-srcStride);
  1501. src0 = *s; s+=srcStride;
  1502. src1 = *s; s+=srcStride;
  1503. src2 = *s; s+=srcStride;
  1504. *d= cm[(9*(src0 + src1) - (src_1 + src2) + 8)>>4]; d+=dstStride;
  1505. src3 = *s; s+=srcStride;
  1506. *d= cm[(9*(src1 + src2) - (src0 + src3) + 8)>>4]; d+=dstStride;
  1507. src4 = *s; s+=srcStride;
  1508. *d= cm[(9*(src2 + src3) - (src1 + src4) + 8)>>4]; d+=dstStride;
  1509. src5 = *s; s+=srcStride;
  1510. *d= cm[(9*(src3 + src4) - (src2 + src5) + 8)>>4]; d+=dstStride;
  1511. src6 = *s; s+=srcStride;
  1512. *d= cm[(9*(src4 + src5) - (src3 + src6) + 8)>>4]; d+=dstStride;
  1513. src7 = *s; s+=srcStride;
  1514. *d= cm[(9*(src5 + src6) - (src4 + src7) + 8)>>4]; d+=dstStride;
  1515. src8 = *s; s+=srcStride;
  1516. *d= cm[(9*(src6 + src7) - (src5 + src8) + 8)>>4]; d+=dstStride;
  1517. src9 = *s;
  1518. *d= cm[(9*(src7 + src8) - (src6 + src9) + 8)>>4]; d+=dstStride;
  1519. src++;
  1520. dst++;
  1521. }while(--w);
  1522. }
  1523. static void put_mspel8_mc00_c (uint8_t *dst, uint8_t *src, int stride){
  1524. put_pixels8_c(dst, src, stride, 8);
  1525. }
  1526. static void put_mspel8_mc10_c(uint8_t *dst, uint8_t *src, int stride){
  1527. uint8_t half[64];
  1528. wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
  1529. put_pixels8_l2_aligned2(dst, src, half, stride, stride, 8, 8);
  1530. }
  1531. static void put_mspel8_mc20_c(uint8_t *dst, uint8_t *src, int stride){
  1532. wmv2_mspel8_h_lowpass(dst, src, stride, stride, 8);
  1533. }
  1534. static void put_mspel8_mc30_c(uint8_t *dst, uint8_t *src, int stride){
  1535. uint8_t half[64];
  1536. wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
  1537. put_pixels8_l2_aligned2(dst, src+1, half, stride, stride, 8, 8);
  1538. }
  1539. static void put_mspel8_mc02_c(uint8_t *dst, uint8_t *src, int stride){
  1540. wmv2_mspel8_v_lowpass(dst, src, stride, stride, 8);
  1541. }
  1542. static void put_mspel8_mc12_c(uint8_t *dst, uint8_t *src, int stride){
  1543. uint8_t halfH[88];
  1544. uint8_t halfV[64];
  1545. uint8_t halfHV[64];
  1546. wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
  1547. wmv2_mspel8_v_lowpass(halfV, src, 8, stride, 8);
  1548. wmv2_mspel8_v_lowpass(halfHV, halfH+8, 8, 8, 8);
  1549. put_pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);
  1550. }
  1551. static void put_mspel8_mc32_c(uint8_t *dst, uint8_t *src, int stride){
  1552. uint8_t halfH[88];
  1553. uint8_t halfV[64];
  1554. uint8_t halfHV[64];
  1555. wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
  1556. wmv2_mspel8_v_lowpass(halfV, src+1, 8, stride, 8);
  1557. wmv2_mspel8_v_lowpass(halfHV, halfH+8, 8, 8, 8);
  1558. put_pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);
  1559. }
  1560. static void put_mspel8_mc22_c(uint8_t *dst, uint8_t *src, int stride){
  1561. uint8_t halfH[88];
  1562. wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
  1563. wmv2_mspel8_v_lowpass(dst, halfH+8, stride, 8, 8);
  1564. }