You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1350 lines
54KB

  1. /*
  2. * DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * gmc & q-pel & 32/64 bit based MC by Michael Niedermayer <michaelni@gmx.at>
  7. *
  8. * This file is part of Libav.
  9. *
  10. * Libav is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * Libav is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with Libav; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. /**
  25. * @file
  26. * DSP utils
  27. */
  28. #include "dsputil.h"
  29. #define BIT_DEPTH 8
  30. #define pixel uint8_t
  31. #define pixel2 uint16_t
  32. #define pixel4 uint32_t
  33. #define dctcoef int16_t
  34. #define FUNC(a) a
  35. #define FUNCC(a) a ## _c
  36. #define INIT_CLIP uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  37. #define CLIP(a) cm[a]
  38. #define AV_RN2P AV_RN16
  39. #define AV_RN4P AV_RN32
  40. #define PIXEL_MAX ((1<<BIT_DEPTH)-1)
  41. #define no_rnd_avg_pixel4 no_rnd_avg32
  42. #define rnd_avg_pixel4 rnd_avg32
  43. /* draw the edges of width 'w' of an image of size width, height */
  44. //FIXME check that this is ok for mpeg4 interlaced
  45. static void FUNCC(draw_edges)(uint8_t *_buf, int _wrap, int width, int height, int w, int sides)
  46. {
  47. pixel *buf = (pixel*)_buf;
  48. int wrap = _wrap / sizeof(pixel);
  49. pixel *ptr, *last_line;
  50. int i;
  51. /* left and right */
  52. ptr = buf;
  53. for(i=0;i<height;i++) {
  54. #if BIT_DEPTH > 8
  55. int j;
  56. for (j = 0; j < w; j++) {
  57. ptr[j-w] = ptr[0];
  58. ptr[j+width] = ptr[width-1];
  59. }
  60. #else
  61. memset(ptr - w, ptr[0], w);
  62. memset(ptr + width, ptr[width-1], w);
  63. #endif
  64. ptr += wrap;
  65. }
  66. /* top and bottom + corners */
  67. buf -= w;
  68. last_line = buf + (height - 1) * wrap;
  69. if (sides & EDGE_TOP)
  70. for(i = 0; i < w; i++)
  71. memcpy(buf - (i + 1) * wrap, buf, (width + w + w) * sizeof(pixel)); // top
  72. if (sides & EDGE_BOTTOM)
  73. for (i = 0; i < w; i++)
  74. memcpy(last_line + (i + 1) * wrap, last_line, (width + w + w) * sizeof(pixel)); // bottom
  75. }
  76. /**
  77. * Copy a rectangular area of samples to a temporary buffer and replicate the border samples.
  78. * @param buf destination buffer
  79. * @param src source buffer
  80. * @param linesize number of bytes between 2 vertically adjacent samples in both the source and destination buffers
  81. * @param block_w width of block
  82. * @param block_h height of block
  83. * @param src_x x coordinate of the top left sample of the block in the source buffer
  84. * @param src_y y coordinate of the top left sample of the block in the source buffer
  85. * @param w width of the source buffer
  86. * @param h height of the source buffer
  87. */
  88. void FUNC(ff_emulated_edge_mc)(uint8_t *buf, const uint8_t *src, int linesize, int block_w, int block_h,
  89. int src_x, int src_y, int w, int h){
  90. int x, y;
  91. int start_y, start_x, end_y, end_x;
  92. if(src_y>= h){
  93. src+= (h-1-src_y)*linesize;
  94. src_y=h-1;
  95. }else if(src_y<=-block_h){
  96. src+= (1-block_h-src_y)*linesize;
  97. src_y=1-block_h;
  98. }
  99. if(src_x>= w){
  100. src+= (w-1-src_x)*sizeof(pixel);
  101. src_x=w-1;
  102. }else if(src_x<=-block_w){
  103. src+= (1-block_w-src_x)*sizeof(pixel);
  104. src_x=1-block_w;
  105. }
  106. start_y= FFMAX(0, -src_y);
  107. start_x= FFMAX(0, -src_x);
  108. end_y= FFMIN(block_h, h-src_y);
  109. end_x= FFMIN(block_w, w-src_x);
  110. assert(start_y < end_y && block_h);
  111. assert(start_x < end_x && block_w);
  112. w = end_x - start_x;
  113. src += start_y*linesize + start_x*sizeof(pixel);
  114. buf += start_x*sizeof(pixel);
  115. //top
  116. for(y=0; y<start_y; y++){
  117. memcpy(buf, src, w*sizeof(pixel));
  118. buf += linesize;
  119. }
  120. // copy existing part
  121. for(; y<end_y; y++){
  122. memcpy(buf, src, w*sizeof(pixel));
  123. src += linesize;
  124. buf += linesize;
  125. }
  126. //bottom
  127. src -= linesize;
  128. for(; y<block_h; y++){
  129. memcpy(buf, src, w*sizeof(pixel));
  130. buf += linesize;
  131. }
  132. buf -= block_h * linesize + start_x*sizeof(pixel);
  133. while (block_h--){
  134. pixel *bufp = (pixel*)buf;
  135. //left
  136. for(x=0; x<start_x; x++){
  137. bufp[x] = bufp[start_x];
  138. }
  139. //right
  140. for(x=end_x; x<block_w; x++){
  141. bufp[x] = bufp[end_x - 1];
  142. }
  143. buf += linesize;
  144. }
  145. }
  146. static void FUNCC(add_pixels8)(uint8_t *restrict _pixels, DCTELEM *_block, int line_size)
  147. {
  148. int i;
  149. pixel *restrict pixels = (pixel *restrict)_pixels;
  150. dctcoef *block = (dctcoef*)_block;
  151. line_size /= sizeof(pixel);
  152. for(i=0;i<8;i++) {
  153. pixels[0] += block[0];
  154. pixels[1] += block[1];
  155. pixels[2] += block[2];
  156. pixels[3] += block[3];
  157. pixels[4] += block[4];
  158. pixels[5] += block[5];
  159. pixels[6] += block[6];
  160. pixels[7] += block[7];
  161. pixels += line_size;
  162. block += 8;
  163. }
  164. }
  165. static void FUNCC(add_pixels4)(uint8_t *restrict _pixels, DCTELEM *_block, int line_size)
  166. {
  167. int i;
  168. pixel *restrict pixels = (pixel *restrict)_pixels;
  169. dctcoef *block = (dctcoef*)_block;
  170. line_size /= sizeof(pixel);
  171. for(i=0;i<4;i++) {
  172. pixels[0] += block[0];
  173. pixels[1] += block[1];
  174. pixels[2] += block[2];
  175. pixels[3] += block[3];
  176. pixels += line_size;
  177. block += 4;
  178. }
  179. }
  180. #if 0
  181. #define PIXOP2(OPNAME, OP) \
  182. static void OPNAME ## _pixels(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  183. {\
  184. int i;\
  185. for(i=0; i<h; i++){\
  186. OP(*((uint64_t*)block), AV_RN64(pixels));\
  187. pixels+=line_size;\
  188. block +=line_size;\
  189. }\
  190. }\
  191. \
  192. static void OPNAME ## _no_rnd_pixels_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  193. {\
  194. int i;\
  195. for(i=0; i<h; i++){\
  196. const uint64_t a= AV_RN64(pixels );\
  197. const uint64_t b= AV_RN64(pixels+1);\
  198. OP(*((uint64_t*)block), (a&b) + (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
  199. pixels+=line_size;\
  200. block +=line_size;\
  201. }\
  202. }\
  203. \
  204. static void OPNAME ## _pixels_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  205. {\
  206. int i;\
  207. for(i=0; i<h; i++){\
  208. const uint64_t a= AV_RN64(pixels );\
  209. const uint64_t b= AV_RN64(pixels+1);\
  210. OP(*((uint64_t*)block), (a|b) - (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
  211. pixels+=line_size;\
  212. block +=line_size;\
  213. }\
  214. }\
  215. \
  216. static void OPNAME ## _no_rnd_pixels_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  217. {\
  218. int i;\
  219. for(i=0; i<h; i++){\
  220. const uint64_t a= AV_RN64(pixels );\
  221. const uint64_t b= AV_RN64(pixels+line_size);\
  222. OP(*((uint64_t*)block), (a&b) + (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
  223. pixels+=line_size;\
  224. block +=line_size;\
  225. }\
  226. }\
  227. \
  228. static void OPNAME ## _pixels_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  229. {\
  230. int i;\
  231. for(i=0; i<h; i++){\
  232. const uint64_t a= AV_RN64(pixels );\
  233. const uint64_t b= AV_RN64(pixels+line_size);\
  234. OP(*((uint64_t*)block), (a|b) - (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
  235. pixels+=line_size;\
  236. block +=line_size;\
  237. }\
  238. }\
  239. \
  240. static void OPNAME ## _pixels_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  241. {\
  242. int i;\
  243. const uint64_t a= AV_RN64(pixels );\
  244. const uint64_t b= AV_RN64(pixels+1);\
  245. uint64_t l0= (a&0x0303030303030303ULL)\
  246. + (b&0x0303030303030303ULL)\
  247. + 0x0202020202020202ULL;\
  248. uint64_t h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
  249. + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
  250. uint64_t l1,h1;\
  251. \
  252. pixels+=line_size;\
  253. for(i=0; i<h; i+=2){\
  254. uint64_t a= AV_RN64(pixels );\
  255. uint64_t b= AV_RN64(pixels+1);\
  256. l1= (a&0x0303030303030303ULL)\
  257. + (b&0x0303030303030303ULL);\
  258. h1= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
  259. + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
  260. OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
  261. pixels+=line_size;\
  262. block +=line_size;\
  263. a= AV_RN64(pixels );\
  264. b= AV_RN64(pixels+1);\
  265. l0= (a&0x0303030303030303ULL)\
  266. + (b&0x0303030303030303ULL)\
  267. + 0x0202020202020202ULL;\
  268. h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
  269. + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
  270. OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
  271. pixels+=line_size;\
  272. block +=line_size;\
  273. }\
  274. }\
  275. \
  276. static void OPNAME ## _no_rnd_pixels_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  277. {\
  278. int i;\
  279. const uint64_t a= AV_RN64(pixels );\
  280. const uint64_t b= AV_RN64(pixels+1);\
  281. uint64_t l0= (a&0x0303030303030303ULL)\
  282. + (b&0x0303030303030303ULL)\
  283. + 0x0101010101010101ULL;\
  284. uint64_t h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
  285. + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
  286. uint64_t l1,h1;\
  287. \
  288. pixels+=line_size;\
  289. for(i=0; i<h; i+=2){\
  290. uint64_t a= AV_RN64(pixels );\
  291. uint64_t b= AV_RN64(pixels+1);\
  292. l1= (a&0x0303030303030303ULL)\
  293. + (b&0x0303030303030303ULL);\
  294. h1= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
  295. + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
  296. OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
  297. pixels+=line_size;\
  298. block +=line_size;\
  299. a= AV_RN64(pixels );\
  300. b= AV_RN64(pixels+1);\
  301. l0= (a&0x0303030303030303ULL)\
  302. + (b&0x0303030303030303ULL)\
  303. + 0x0101010101010101ULL;\
  304. h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
  305. + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
  306. OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
  307. pixels+=line_size;\
  308. block +=line_size;\
  309. }\
  310. }\
  311. \
  312. CALL_2X_PIXELS(OPNAME ## _pixels16_c , OPNAME ## _pixels_c , 8*sizeof(pixel))\
  313. CALL_2X_PIXELS(OPNAME ## _pixels16_x2_c , OPNAME ## _pixels_x2_c , 8*sizeof(pixel))\
  314. CALL_2X_PIXELS(OPNAME ## _pixels16_y2_c , OPNAME ## _pixels_y2_c , 8*sizeof(pixel))\
  315. CALL_2X_PIXELS(OPNAME ## _pixels16_xy2_c, OPNAME ## _pixels_xy2_c, 8*sizeof(pixel))\
  316. CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_x2_c , OPNAME ## _no_rnd_pixels_x2_c , 8*sizeof(pixel))\
  317. CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_y2_c , OPNAME ## _no_rnd_pixels_y2_c , 8*sizeof(pixel))\
  318. CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_xy2_c, OPNAME ## _no_rnd_pixels_xy2_c, 8*sizeof(pixel))
  319. #define op_avg(a, b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEFEFEFEFEULL)>>1) )
  320. #else // 64 bit variant
  321. #define PIXOP2(OPNAME, OP) \
  322. static void FUNCC(OPNAME ## _pixels2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  323. int i;\
  324. for(i=0; i<h; i++){\
  325. OP(*((pixel2*)(block )), AV_RN2P(pixels ));\
  326. pixels+=line_size;\
  327. block +=line_size;\
  328. }\
  329. }\
  330. static void FUNCC(OPNAME ## _pixels4)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  331. int i;\
  332. for(i=0; i<h; i++){\
  333. OP(*((pixel4*)(block )), AV_RN4P(pixels ));\
  334. pixels+=line_size;\
  335. block +=line_size;\
  336. }\
  337. }\
  338. static void FUNCC(OPNAME ## _pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  339. int i;\
  340. for(i=0; i<h; i++){\
  341. OP(*((pixel4*)(block )), AV_RN4P(pixels ));\
  342. OP(*((pixel4*)(block+4*sizeof(pixel))), AV_RN4P(pixels+4*sizeof(pixel)));\
  343. pixels+=line_size;\
  344. block +=line_size;\
  345. }\
  346. }\
  347. static inline void FUNCC(OPNAME ## _no_rnd_pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  348. FUNCC(OPNAME ## _pixels8)(block, pixels, line_size, h);\
  349. }\
  350. \
  351. static inline void FUNC(OPNAME ## _no_rnd_pixels8_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  352. int src_stride1, int src_stride2, int h){\
  353. int i;\
  354. for(i=0; i<h; i++){\
  355. pixel4 a,b;\
  356. a= AV_RN4P(&src1[i*src_stride1 ]);\
  357. b= AV_RN4P(&src2[i*src_stride2 ]);\
  358. OP(*((pixel4*)&dst[i*dst_stride ]), no_rnd_avg_pixel4(a, b));\
  359. a= AV_RN4P(&src1[i*src_stride1+4*sizeof(pixel)]);\
  360. b= AV_RN4P(&src2[i*src_stride2+4*sizeof(pixel)]);\
  361. OP(*((pixel4*)&dst[i*dst_stride+4*sizeof(pixel)]), no_rnd_avg_pixel4(a, b));\
  362. }\
  363. }\
  364. \
  365. static inline void FUNC(OPNAME ## _pixels8_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  366. int src_stride1, int src_stride2, int h){\
  367. int i;\
  368. for(i=0; i<h; i++){\
  369. pixel4 a,b;\
  370. a= AV_RN4P(&src1[i*src_stride1 ]);\
  371. b= AV_RN4P(&src2[i*src_stride2 ]);\
  372. OP(*((pixel4*)&dst[i*dst_stride ]), rnd_avg_pixel4(a, b));\
  373. a= AV_RN4P(&src1[i*src_stride1+4*sizeof(pixel)]);\
  374. b= AV_RN4P(&src2[i*src_stride2+4*sizeof(pixel)]);\
  375. OP(*((pixel4*)&dst[i*dst_stride+4*sizeof(pixel)]), rnd_avg_pixel4(a, b));\
  376. }\
  377. }\
  378. \
  379. static inline void FUNC(OPNAME ## _pixels4_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  380. int src_stride1, int src_stride2, int h){\
  381. int i;\
  382. for(i=0; i<h; i++){\
  383. pixel4 a,b;\
  384. a= AV_RN4P(&src1[i*src_stride1 ]);\
  385. b= AV_RN4P(&src2[i*src_stride2 ]);\
  386. OP(*((pixel4*)&dst[i*dst_stride ]), rnd_avg_pixel4(a, b));\
  387. }\
  388. }\
  389. \
  390. static inline void FUNC(OPNAME ## _pixels2_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  391. int src_stride1, int src_stride2, int h){\
  392. int i;\
  393. for(i=0; i<h; i++){\
  394. pixel4 a,b;\
  395. a= AV_RN2P(&src1[i*src_stride1 ]);\
  396. b= AV_RN2P(&src2[i*src_stride2 ]);\
  397. OP(*((pixel2*)&dst[i*dst_stride ]), rnd_avg_pixel4(a, b));\
  398. }\
  399. }\
  400. \
  401. static inline void FUNC(OPNAME ## _pixels16_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  402. int src_stride1, int src_stride2, int h){\
  403. FUNC(OPNAME ## _pixels8_l2)(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);\
  404. FUNC(OPNAME ## _pixels8_l2)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, h);\
  405. }\
  406. \
  407. static inline void FUNC(OPNAME ## _no_rnd_pixels16_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  408. int src_stride1, int src_stride2, int h){\
  409. FUNC(OPNAME ## _no_rnd_pixels8_l2)(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);\
  410. FUNC(OPNAME ## _no_rnd_pixels8_l2)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, h);\
  411. }\
  412. \
  413. static inline void FUNCC(OPNAME ## _no_rnd_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  414. FUNC(OPNAME ## _no_rnd_pixels8_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
  415. }\
  416. \
  417. static inline void FUNCC(OPNAME ## _pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  418. FUNC(OPNAME ## _pixels8_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
  419. }\
  420. \
  421. static inline void FUNCC(OPNAME ## _no_rnd_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  422. FUNC(OPNAME ## _no_rnd_pixels8_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
  423. }\
  424. \
  425. static inline void FUNCC(OPNAME ## _pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  426. FUNC(OPNAME ## _pixels8_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
  427. }\
  428. \
  429. static inline void FUNC(OPNAME ## _pixels8_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
  430. int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  431. /* FIXME HIGH BIT DEPTH */\
  432. int i;\
  433. for(i=0; i<h; i++){\
  434. uint32_t a, b, c, d, l0, l1, h0, h1;\
  435. a= AV_RN32(&src1[i*src_stride1]);\
  436. b= AV_RN32(&src2[i*src_stride2]);\
  437. c= AV_RN32(&src3[i*src_stride3]);\
  438. d= AV_RN32(&src4[i*src_stride4]);\
  439. l0= (a&0x03030303UL)\
  440. + (b&0x03030303UL)\
  441. + 0x02020202UL;\
  442. h0= ((a&0xFCFCFCFCUL)>>2)\
  443. + ((b&0xFCFCFCFCUL)>>2);\
  444. l1= (c&0x03030303UL)\
  445. + (d&0x03030303UL);\
  446. h1= ((c&0xFCFCFCFCUL)>>2)\
  447. + ((d&0xFCFCFCFCUL)>>2);\
  448. OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  449. a= AV_RN32(&src1[i*src_stride1+4]);\
  450. b= AV_RN32(&src2[i*src_stride2+4]);\
  451. c= AV_RN32(&src3[i*src_stride3+4]);\
  452. d= AV_RN32(&src4[i*src_stride4+4]);\
  453. l0= (a&0x03030303UL)\
  454. + (b&0x03030303UL)\
  455. + 0x02020202UL;\
  456. h0= ((a&0xFCFCFCFCUL)>>2)\
  457. + ((b&0xFCFCFCFCUL)>>2);\
  458. l1= (c&0x03030303UL)\
  459. + (d&0x03030303UL);\
  460. h1= ((c&0xFCFCFCFCUL)>>2)\
  461. + ((d&0xFCFCFCFCUL)>>2);\
  462. OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  463. }\
  464. }\
  465. \
  466. static inline void FUNCC(OPNAME ## _pixels4_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  467. FUNC(OPNAME ## _pixels4_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
  468. }\
  469. \
  470. static inline void FUNCC(OPNAME ## _pixels4_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  471. FUNC(OPNAME ## _pixels4_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
  472. }\
  473. \
  474. static inline void FUNCC(OPNAME ## _pixels2_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  475. FUNC(OPNAME ## _pixels2_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
  476. }\
  477. \
  478. static inline void FUNCC(OPNAME ## _pixels2_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  479. FUNC(OPNAME ## _pixels2_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
  480. }\
  481. \
  482. static inline void FUNC(OPNAME ## _no_rnd_pixels8_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
  483. int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  484. /* FIXME HIGH BIT DEPTH*/\
  485. int i;\
  486. for(i=0; i<h; i++){\
  487. uint32_t a, b, c, d, l0, l1, h0, h1;\
  488. a= AV_RN32(&src1[i*src_stride1]);\
  489. b= AV_RN32(&src2[i*src_stride2]);\
  490. c= AV_RN32(&src3[i*src_stride3]);\
  491. d= AV_RN32(&src4[i*src_stride4]);\
  492. l0= (a&0x03030303UL)\
  493. + (b&0x03030303UL)\
  494. + 0x01010101UL;\
  495. h0= ((a&0xFCFCFCFCUL)>>2)\
  496. + ((b&0xFCFCFCFCUL)>>2);\
  497. l1= (c&0x03030303UL)\
  498. + (d&0x03030303UL);\
  499. h1= ((c&0xFCFCFCFCUL)>>2)\
  500. + ((d&0xFCFCFCFCUL)>>2);\
  501. OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  502. a= AV_RN32(&src1[i*src_stride1+4]);\
  503. b= AV_RN32(&src2[i*src_stride2+4]);\
  504. c= AV_RN32(&src3[i*src_stride3+4]);\
  505. d= AV_RN32(&src4[i*src_stride4+4]);\
  506. l0= (a&0x03030303UL)\
  507. + (b&0x03030303UL)\
  508. + 0x01010101UL;\
  509. h0= ((a&0xFCFCFCFCUL)>>2)\
  510. + ((b&0xFCFCFCFCUL)>>2);\
  511. l1= (c&0x03030303UL)\
  512. + (d&0x03030303UL);\
  513. h1= ((c&0xFCFCFCFCUL)>>2)\
  514. + ((d&0xFCFCFCFCUL)>>2);\
  515. OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  516. }\
  517. }\
  518. static inline void FUNC(OPNAME ## _pixels16_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
  519. int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  520. FUNC(OPNAME ## _pixels8_l4)(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
  521. FUNC(OPNAME ## _pixels8_l4)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), src3+8*sizeof(pixel), src4+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
  522. }\
  523. static inline void FUNC(OPNAME ## _no_rnd_pixels16_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
  524. int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  525. FUNC(OPNAME ## _no_rnd_pixels8_l4)(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
  526. FUNC(OPNAME ## _no_rnd_pixels8_l4)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), src3+8*sizeof(pixel), src4+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
  527. }\
  528. \
  529. static inline void FUNCC(OPNAME ## _pixels2_xy2)(uint8_t *_block, const uint8_t *_pixels, int line_size, int h)\
  530. {\
  531. int i, a0, b0, a1, b1;\
  532. pixel *block = (pixel*)_block;\
  533. const pixel *pixels = (const pixel*)_pixels;\
  534. line_size /= sizeof(pixel);\
  535. a0= pixels[0];\
  536. b0= pixels[1] + 2;\
  537. a0 += b0;\
  538. b0 += pixels[2];\
  539. \
  540. pixels+=line_size;\
  541. for(i=0; i<h; i+=2){\
  542. a1= pixels[0];\
  543. b1= pixels[1];\
  544. a1 += b1;\
  545. b1 += pixels[2];\
  546. \
  547. block[0]= (a1+a0)>>2; /* FIXME non put */\
  548. block[1]= (b1+b0)>>2;\
  549. \
  550. pixels+=line_size;\
  551. block +=line_size;\
  552. \
  553. a0= pixels[0];\
  554. b0= pixels[1] + 2;\
  555. a0 += b0;\
  556. b0 += pixels[2];\
  557. \
  558. block[0]= (a1+a0)>>2;\
  559. block[1]= (b1+b0)>>2;\
  560. pixels+=line_size;\
  561. block +=line_size;\
  562. }\
  563. }\
  564. \
  565. static inline void FUNCC(OPNAME ## _pixels4_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  566. {\
  567. /* FIXME HIGH BIT DEPTH */\
  568. int i;\
  569. const uint32_t a= AV_RN32(pixels );\
  570. const uint32_t b= AV_RN32(pixels+1);\
  571. uint32_t l0= (a&0x03030303UL)\
  572. + (b&0x03030303UL)\
  573. + 0x02020202UL;\
  574. uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
  575. + ((b&0xFCFCFCFCUL)>>2);\
  576. uint32_t l1,h1;\
  577. \
  578. pixels+=line_size;\
  579. for(i=0; i<h; i+=2){\
  580. uint32_t a= AV_RN32(pixels );\
  581. uint32_t b= AV_RN32(pixels+1);\
  582. l1= (a&0x03030303UL)\
  583. + (b&0x03030303UL);\
  584. h1= ((a&0xFCFCFCFCUL)>>2)\
  585. + ((b&0xFCFCFCFCUL)>>2);\
  586. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  587. pixels+=line_size;\
  588. block +=line_size;\
  589. a= AV_RN32(pixels );\
  590. b= AV_RN32(pixels+1);\
  591. l0= (a&0x03030303UL)\
  592. + (b&0x03030303UL)\
  593. + 0x02020202UL;\
  594. h0= ((a&0xFCFCFCFCUL)>>2)\
  595. + ((b&0xFCFCFCFCUL)>>2);\
  596. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  597. pixels+=line_size;\
  598. block +=line_size;\
  599. }\
  600. }\
  601. \
  602. static inline void FUNCC(OPNAME ## _pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  603. {\
  604. /* FIXME HIGH BIT DEPTH */\
  605. int j;\
  606. for(j=0; j<2; j++){\
  607. int i;\
  608. const uint32_t a= AV_RN32(pixels );\
  609. const uint32_t b= AV_RN32(pixels+1);\
  610. uint32_t l0= (a&0x03030303UL)\
  611. + (b&0x03030303UL)\
  612. + 0x02020202UL;\
  613. uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
  614. + ((b&0xFCFCFCFCUL)>>2);\
  615. uint32_t l1,h1;\
  616. \
  617. pixels+=line_size;\
  618. for(i=0; i<h; i+=2){\
  619. uint32_t a= AV_RN32(pixels );\
  620. uint32_t b= AV_RN32(pixels+1);\
  621. l1= (a&0x03030303UL)\
  622. + (b&0x03030303UL);\
  623. h1= ((a&0xFCFCFCFCUL)>>2)\
  624. + ((b&0xFCFCFCFCUL)>>2);\
  625. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  626. pixels+=line_size;\
  627. block +=line_size;\
  628. a= AV_RN32(pixels );\
  629. b= AV_RN32(pixels+1);\
  630. l0= (a&0x03030303UL)\
  631. + (b&0x03030303UL)\
  632. + 0x02020202UL;\
  633. h0= ((a&0xFCFCFCFCUL)>>2)\
  634. + ((b&0xFCFCFCFCUL)>>2);\
  635. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  636. pixels+=line_size;\
  637. block +=line_size;\
  638. }\
  639. pixels+=4-line_size*(h+1);\
  640. block +=4-line_size*h;\
  641. }\
  642. }\
  643. \
  644. static inline void FUNCC(OPNAME ## _no_rnd_pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  645. {\
  646. /* FIXME HIGH BIT DEPTH */\
  647. int j;\
  648. for(j=0; j<2; j++){\
  649. int i;\
  650. const uint32_t a= AV_RN32(pixels );\
  651. const uint32_t b= AV_RN32(pixels+1);\
  652. uint32_t l0= (a&0x03030303UL)\
  653. + (b&0x03030303UL)\
  654. + 0x01010101UL;\
  655. uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
  656. + ((b&0xFCFCFCFCUL)>>2);\
  657. uint32_t l1,h1;\
  658. \
  659. pixels+=line_size;\
  660. for(i=0; i<h; i+=2){\
  661. uint32_t a= AV_RN32(pixels );\
  662. uint32_t b= AV_RN32(pixels+1);\
  663. l1= (a&0x03030303UL)\
  664. + (b&0x03030303UL);\
  665. h1= ((a&0xFCFCFCFCUL)>>2)\
  666. + ((b&0xFCFCFCFCUL)>>2);\
  667. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  668. pixels+=line_size;\
  669. block +=line_size;\
  670. a= AV_RN32(pixels );\
  671. b= AV_RN32(pixels+1);\
  672. l0= (a&0x03030303UL)\
  673. + (b&0x03030303UL)\
  674. + 0x01010101UL;\
  675. h0= ((a&0xFCFCFCFCUL)>>2)\
  676. + ((b&0xFCFCFCFCUL)>>2);\
  677. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  678. pixels+=line_size;\
  679. block +=line_size;\
  680. }\
  681. pixels+=4-line_size*(h+1);\
  682. block +=4-line_size*h;\
  683. }\
  684. }\
  685. \
  686. CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16) , FUNCC(OPNAME ## _pixels8) , 8*sizeof(pixel))\
  687. CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_x2) , FUNCC(OPNAME ## _pixels8_x2) , 8*sizeof(pixel))\
  688. CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_y2) , FUNCC(OPNAME ## _pixels8_y2) , 8*sizeof(pixel))\
  689. CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_xy2), FUNCC(OPNAME ## _pixels8_xy2), 8*sizeof(pixel))\
  690. av_unused CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16) , FUNCC(OPNAME ## _pixels8) , 8*sizeof(pixel))\
  691. CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_x2) , FUNCC(OPNAME ## _no_rnd_pixels8_x2) , 8*sizeof(pixel))\
  692. CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_y2) , FUNCC(OPNAME ## _no_rnd_pixels8_y2) , 8*sizeof(pixel))\
  693. CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_xy2), FUNCC(OPNAME ## _no_rnd_pixels8_xy2), 8*sizeof(pixel))\
  694. #define op_avg(a, b) a = rnd_avg_pixel4(a, b)
  695. #endif
  696. #define op_put(a, b) a = b
  697. PIXOP2(avg, op_avg)
  698. PIXOP2(put, op_put)
  699. #undef op_avg
  700. #undef op_put
  701. #define put_no_rnd_pixels8_c put_pixels8_c
  702. #define put_no_rnd_pixels16_c put_pixels16_c
  703. static void FUNCC(put_no_rnd_pixels16_l2)(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h){
  704. FUNC(put_no_rnd_pixels16_l2)(dst, a, b, stride, stride, stride, h);
  705. }
  706. static void FUNCC(put_no_rnd_pixels8_l2)(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h){
  707. FUNC(put_no_rnd_pixels8_l2)(dst, a, b, stride, stride, stride, h);
  708. }
  709. #define H264_CHROMA_MC(OPNAME, OP)\
  710. static void FUNCC(OPNAME ## h264_chroma_mc2)(uint8_t *_dst/*align 8*/, uint8_t *_src/*align 1*/, int stride, int h, int x, int y){\
  711. pixel *dst = (pixel*)_dst;\
  712. pixel *src = (pixel*)_src;\
  713. const int A=(8-x)*(8-y);\
  714. const int B=( x)*(8-y);\
  715. const int C=(8-x)*( y);\
  716. const int D=( x)*( y);\
  717. int i;\
  718. stride /= sizeof(pixel);\
  719. \
  720. assert(x<8 && y<8 && x>=0 && y>=0);\
  721. \
  722. if(D){\
  723. for(i=0; i<h; i++){\
  724. OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
  725. OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
  726. dst+= stride;\
  727. src+= stride;\
  728. }\
  729. }else{\
  730. const int E= B+C;\
  731. const int step= C ? stride : 1;\
  732. for(i=0; i<h; i++){\
  733. OP(dst[0], (A*src[0] + E*src[step+0]));\
  734. OP(dst[1], (A*src[1] + E*src[step+1]));\
  735. dst+= stride;\
  736. src+= stride;\
  737. }\
  738. }\
  739. }\
  740. \
  741. static void FUNCC(OPNAME ## h264_chroma_mc4)(uint8_t *_dst/*align 8*/, uint8_t *_src/*align 1*/, int stride, int h, int x, int y){\
  742. pixel *dst = (pixel*)_dst;\
  743. pixel *src = (pixel*)_src;\
  744. const int A=(8-x)*(8-y);\
  745. const int B=( x)*(8-y);\
  746. const int C=(8-x)*( y);\
  747. const int D=( x)*( y);\
  748. int i;\
  749. stride /= sizeof(pixel);\
  750. \
  751. assert(x<8 && y<8 && x>=0 && y>=0);\
  752. \
  753. if(D){\
  754. for(i=0; i<h; i++){\
  755. OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
  756. OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
  757. OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
  758. OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
  759. dst+= stride;\
  760. src+= stride;\
  761. }\
  762. }else{\
  763. const int E= B+C;\
  764. const int step= C ? stride : 1;\
  765. for(i=0; i<h; i++){\
  766. OP(dst[0], (A*src[0] + E*src[step+0]));\
  767. OP(dst[1], (A*src[1] + E*src[step+1]));\
  768. OP(dst[2], (A*src[2] + E*src[step+2]));\
  769. OP(dst[3], (A*src[3] + E*src[step+3]));\
  770. dst+= stride;\
  771. src+= stride;\
  772. }\
  773. }\
  774. }\
  775. \
  776. static void FUNCC(OPNAME ## h264_chroma_mc8)(uint8_t *_dst/*align 8*/, uint8_t *_src/*align 1*/, int stride, int h, int x, int y){\
  777. pixel *dst = (pixel*)_dst;\
  778. pixel *src = (pixel*)_src;\
  779. const int A=(8-x)*(8-y);\
  780. const int B=( x)*(8-y);\
  781. const int C=(8-x)*( y);\
  782. const int D=( x)*( y);\
  783. int i;\
  784. stride /= sizeof(pixel);\
  785. \
  786. assert(x<8 && y<8 && x>=0 && y>=0);\
  787. \
  788. if(D){\
  789. for(i=0; i<h; i++){\
  790. OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
  791. OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
  792. OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
  793. OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
  794. OP(dst[4], (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5]));\
  795. OP(dst[5], (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6]));\
  796. OP(dst[6], (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7]));\
  797. OP(dst[7], (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8]));\
  798. dst+= stride;\
  799. src+= stride;\
  800. }\
  801. }else{\
  802. const int E= B+C;\
  803. const int step= C ? stride : 1;\
  804. for(i=0; i<h; i++){\
  805. OP(dst[0], (A*src[0] + E*src[step+0]));\
  806. OP(dst[1], (A*src[1] + E*src[step+1]));\
  807. OP(dst[2], (A*src[2] + E*src[step+2]));\
  808. OP(dst[3], (A*src[3] + E*src[step+3]));\
  809. OP(dst[4], (A*src[4] + E*src[step+4]));\
  810. OP(dst[5], (A*src[5] + E*src[step+5]));\
  811. OP(dst[6], (A*src[6] + E*src[step+6]));\
  812. OP(dst[7], (A*src[7] + E*src[step+7]));\
  813. dst+= stride;\
  814. src+= stride;\
  815. }\
  816. }\
  817. }
  818. #define op_avg(a, b) a = (((a)+(((b) + 32)>>6)+1)>>1)
  819. #define op_put(a, b) a = (((b) + 32)>>6)
  820. H264_CHROMA_MC(put_ , op_put)
  821. H264_CHROMA_MC(avg_ , op_avg)
  822. #undef op_avg
  823. #undef op_put
  824. #define H264_LOWPASS(OPNAME, OP, OP2) \
  825. static av_unused void FUNC(OPNAME ## h264_qpel2_h_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
  826. const int h=2;\
  827. INIT_CLIP\
  828. int i;\
  829. pixel *dst = (pixel*)_dst;\
  830. pixel *src = (pixel*)_src;\
  831. dstStride /= sizeof(pixel);\
  832. srcStride /= sizeof(pixel);\
  833. for(i=0; i<h; i++)\
  834. {\
  835. OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
  836. OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
  837. dst+=dstStride;\
  838. src+=srcStride;\
  839. }\
  840. }\
  841. \
  842. static av_unused void FUNC(OPNAME ## h264_qpel2_v_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
  843. const int w=2;\
  844. INIT_CLIP\
  845. int i;\
  846. pixel *dst = (pixel*)_dst;\
  847. pixel *src = (pixel*)_src;\
  848. dstStride /= sizeof(pixel);\
  849. srcStride /= sizeof(pixel);\
  850. for(i=0; i<w; i++)\
  851. {\
  852. const int srcB= src[-2*srcStride];\
  853. const int srcA= src[-1*srcStride];\
  854. const int src0= src[0 *srcStride];\
  855. const int src1= src[1 *srcStride];\
  856. const int src2= src[2 *srcStride];\
  857. const int src3= src[3 *srcStride];\
  858. const int src4= src[4 *srcStride];\
  859. OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
  860. OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
  861. dst++;\
  862. src++;\
  863. }\
  864. }\
  865. \
  866. static av_unused void FUNC(OPNAME ## h264_qpel2_hv_lowpass)(uint8_t *_dst, int16_t *tmp, uint8_t *_src, int dstStride, int tmpStride, int srcStride){\
  867. const int h=2;\
  868. const int w=2;\
  869. const int pad = (BIT_DEPTH > 9) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
  870. INIT_CLIP\
  871. int i;\
  872. pixel *dst = (pixel*)_dst;\
  873. pixel *src = (pixel*)_src;\
  874. dstStride /= sizeof(pixel);\
  875. srcStride /= sizeof(pixel);\
  876. src -= 2*srcStride;\
  877. for(i=0; i<h+5; i++)\
  878. {\
  879. tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]) + pad;\
  880. tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]) + pad;\
  881. tmp+=tmpStride;\
  882. src+=srcStride;\
  883. }\
  884. tmp -= tmpStride*(h+5-2);\
  885. for(i=0; i<w; i++)\
  886. {\
  887. const int tmpB= tmp[-2*tmpStride] - pad;\
  888. const int tmpA= tmp[-1*tmpStride] - pad;\
  889. const int tmp0= tmp[0 *tmpStride] - pad;\
  890. const int tmp1= tmp[1 *tmpStride] - pad;\
  891. const int tmp2= tmp[2 *tmpStride] - pad;\
  892. const int tmp3= tmp[3 *tmpStride] - pad;\
  893. const int tmp4= tmp[4 *tmpStride] - pad;\
  894. OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
  895. OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
  896. dst++;\
  897. tmp++;\
  898. }\
  899. }\
  900. static void FUNC(OPNAME ## h264_qpel4_h_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
  901. const int h=4;\
  902. INIT_CLIP\
  903. int i;\
  904. pixel *dst = (pixel*)_dst;\
  905. pixel *src = (pixel*)_src;\
  906. dstStride /= sizeof(pixel);\
  907. srcStride /= sizeof(pixel);\
  908. for(i=0; i<h; i++)\
  909. {\
  910. OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
  911. OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
  912. OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]));\
  913. OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]));\
  914. dst+=dstStride;\
  915. src+=srcStride;\
  916. }\
  917. }\
  918. \
  919. static void FUNC(OPNAME ## h264_qpel4_v_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
  920. const int w=4;\
  921. INIT_CLIP\
  922. int i;\
  923. pixel *dst = (pixel*)_dst;\
  924. pixel *src = (pixel*)_src;\
  925. dstStride /= sizeof(pixel);\
  926. srcStride /= sizeof(pixel);\
  927. for(i=0; i<w; i++)\
  928. {\
  929. const int srcB= src[-2*srcStride];\
  930. const int srcA= src[-1*srcStride];\
  931. const int src0= src[0 *srcStride];\
  932. const int src1= src[1 *srcStride];\
  933. const int src2= src[2 *srcStride];\
  934. const int src3= src[3 *srcStride];\
  935. const int src4= src[4 *srcStride];\
  936. const int src5= src[5 *srcStride];\
  937. const int src6= src[6 *srcStride];\
  938. OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
  939. OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
  940. OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
  941. OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
  942. dst++;\
  943. src++;\
  944. }\
  945. }\
  946. \
  947. static void FUNC(OPNAME ## h264_qpel4_hv_lowpass)(uint8_t *_dst, int16_t *tmp, uint8_t *_src, int dstStride, int tmpStride, int srcStride){\
  948. const int h=4;\
  949. const int w=4;\
  950. const int pad = (BIT_DEPTH > 9) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
  951. INIT_CLIP\
  952. int i;\
  953. pixel *dst = (pixel*)_dst;\
  954. pixel *src = (pixel*)_src;\
  955. dstStride /= sizeof(pixel);\
  956. srcStride /= sizeof(pixel);\
  957. src -= 2*srcStride;\
  958. for(i=0; i<h+5; i++)\
  959. {\
  960. tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]) + pad;\
  961. tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]) + pad;\
  962. tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]) + pad;\
  963. tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]) + pad;\
  964. tmp+=tmpStride;\
  965. src+=srcStride;\
  966. }\
  967. tmp -= tmpStride*(h+5-2);\
  968. for(i=0; i<w; i++)\
  969. {\
  970. const int tmpB= tmp[-2*tmpStride] - pad;\
  971. const int tmpA= tmp[-1*tmpStride] - pad;\
  972. const int tmp0= tmp[0 *tmpStride] - pad;\
  973. const int tmp1= tmp[1 *tmpStride] - pad;\
  974. const int tmp2= tmp[2 *tmpStride] - pad;\
  975. const int tmp3= tmp[3 *tmpStride] - pad;\
  976. const int tmp4= tmp[4 *tmpStride] - pad;\
  977. const int tmp5= tmp[5 *tmpStride] - pad;\
  978. const int tmp6= tmp[6 *tmpStride] - pad;\
  979. OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
  980. OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
  981. OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
  982. OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
  983. dst++;\
  984. tmp++;\
  985. }\
  986. }\
  987. \
  988. static void FUNC(OPNAME ## h264_qpel8_h_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
  989. const int h=8;\
  990. INIT_CLIP\
  991. int i;\
  992. pixel *dst = (pixel*)_dst;\
  993. pixel *src = (pixel*)_src;\
  994. dstStride /= sizeof(pixel);\
  995. srcStride /= sizeof(pixel);\
  996. for(i=0; i<h; i++)\
  997. {\
  998. OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]));\
  999. OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]));\
  1000. OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]));\
  1001. OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]));\
  1002. OP(dst[4], (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]));\
  1003. OP(dst[5], (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]));\
  1004. OP(dst[6], (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]));\
  1005. OP(dst[7], (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]));\
  1006. dst+=dstStride;\
  1007. src+=srcStride;\
  1008. }\
  1009. }\
  1010. \
  1011. static void FUNC(OPNAME ## h264_qpel8_v_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
  1012. const int w=8;\
  1013. INIT_CLIP\
  1014. int i;\
  1015. pixel *dst = (pixel*)_dst;\
  1016. pixel *src = (pixel*)_src;\
  1017. dstStride /= sizeof(pixel);\
  1018. srcStride /= sizeof(pixel);\
  1019. for(i=0; i<w; i++)\
  1020. {\
  1021. const int srcB= src[-2*srcStride];\
  1022. const int srcA= src[-1*srcStride];\
  1023. const int src0= src[0 *srcStride];\
  1024. const int src1= src[1 *srcStride];\
  1025. const int src2= src[2 *srcStride];\
  1026. const int src3= src[3 *srcStride];\
  1027. const int src4= src[4 *srcStride];\
  1028. const int src5= src[5 *srcStride];\
  1029. const int src6= src[6 *srcStride];\
  1030. const int src7= src[7 *srcStride];\
  1031. const int src8= src[8 *srcStride];\
  1032. const int src9= src[9 *srcStride];\
  1033. const int src10=src[10*srcStride];\
  1034. OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
  1035. OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
  1036. OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
  1037. OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
  1038. OP(dst[4*dstStride], (src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
  1039. OP(dst[5*dstStride], (src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
  1040. OP(dst[6*dstStride], (src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
  1041. OP(dst[7*dstStride], (src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
  1042. dst++;\
  1043. src++;\
  1044. }\
  1045. }\
  1046. \
  1047. static void FUNC(OPNAME ## h264_qpel8_hv_lowpass)(uint8_t *_dst, int16_t *tmp, uint8_t *_src, int dstStride, int tmpStride, int srcStride){\
  1048. const int h=8;\
  1049. const int w=8;\
  1050. const int pad = (BIT_DEPTH > 9) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
  1051. INIT_CLIP\
  1052. int i;\
  1053. pixel *dst = (pixel*)_dst;\
  1054. pixel *src = (pixel*)_src;\
  1055. dstStride /= sizeof(pixel);\
  1056. srcStride /= sizeof(pixel);\
  1057. src -= 2*srcStride;\
  1058. for(i=0; i<h+5; i++)\
  1059. {\
  1060. tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]) + pad;\
  1061. tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]) + pad;\
  1062. tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]) + pad;\
  1063. tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]) + pad;\
  1064. tmp[4]= (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]) + pad;\
  1065. tmp[5]= (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]) + pad;\
  1066. tmp[6]= (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]) + pad;\
  1067. tmp[7]= (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]) + pad;\
  1068. tmp+=tmpStride;\
  1069. src+=srcStride;\
  1070. }\
  1071. tmp -= tmpStride*(h+5-2);\
  1072. for(i=0; i<w; i++)\
  1073. {\
  1074. const int tmpB= tmp[-2*tmpStride] - pad;\
  1075. const int tmpA= tmp[-1*tmpStride] - pad;\
  1076. const int tmp0= tmp[0 *tmpStride] - pad;\
  1077. const int tmp1= tmp[1 *tmpStride] - pad;\
  1078. const int tmp2= tmp[2 *tmpStride] - pad;\
  1079. const int tmp3= tmp[3 *tmpStride] - pad;\
  1080. const int tmp4= tmp[4 *tmpStride] - pad;\
  1081. const int tmp5= tmp[5 *tmpStride] - pad;\
  1082. const int tmp6= tmp[6 *tmpStride] - pad;\
  1083. const int tmp7= tmp[7 *tmpStride] - pad;\
  1084. const int tmp8= tmp[8 *tmpStride] - pad;\
  1085. const int tmp9= tmp[9 *tmpStride] - pad;\
  1086. const int tmp10=tmp[10*tmpStride] - pad;\
  1087. OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
  1088. OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
  1089. OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
  1090. OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
  1091. OP2(dst[4*dstStride], (tmp4+tmp5)*20 - (tmp3+tmp6)*5 + (tmp2+tmp7));\
  1092. OP2(dst[5*dstStride], (tmp5+tmp6)*20 - (tmp4+tmp7)*5 + (tmp3+tmp8));\
  1093. OP2(dst[6*dstStride], (tmp6+tmp7)*20 - (tmp5+tmp8)*5 + (tmp4+tmp9));\
  1094. OP2(dst[7*dstStride], (tmp7+tmp8)*20 - (tmp6+tmp9)*5 + (tmp5+tmp10));\
  1095. dst++;\
  1096. tmp++;\
  1097. }\
  1098. }\
  1099. \
  1100. static void FUNC(OPNAME ## h264_qpel16_v_lowpass)(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1101. FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst , src , dstStride, srcStride);\
  1102. FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
  1103. src += 8*srcStride;\
  1104. dst += 8*dstStride;\
  1105. FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst , src , dstStride, srcStride);\
  1106. FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
  1107. }\
  1108. \
  1109. static void FUNC(OPNAME ## h264_qpel16_h_lowpass)(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1110. FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst , src , dstStride, srcStride);\
  1111. FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
  1112. src += 8*srcStride;\
  1113. dst += 8*dstStride;\
  1114. FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst , src , dstStride, srcStride);\
  1115. FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
  1116. }\
  1117. \
  1118. static void FUNC(OPNAME ## h264_qpel16_hv_lowpass)(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  1119. FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst , tmp , src , dstStride, tmpStride, srcStride);\
  1120. FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst+8*sizeof(pixel), tmp+8, src+8*sizeof(pixel), dstStride, tmpStride, srcStride);\
  1121. src += 8*srcStride;\
  1122. dst += 8*dstStride;\
  1123. FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst , tmp , src , dstStride, tmpStride, srcStride);\
  1124. FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst+8*sizeof(pixel), tmp+8, src+8*sizeof(pixel), dstStride, tmpStride, srcStride);\
  1125. }\
  1126. #define H264_MC(OPNAME, SIZE) \
  1127. static av_unused void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc00)(uint8_t *dst, uint8_t *src, int stride){\
  1128. FUNCC(OPNAME ## pixels ## SIZE)(dst, src, stride, SIZE);\
  1129. }\
  1130. \
  1131. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc10)(uint8_t *dst, uint8_t *src, int stride){\
  1132. uint8_t half[SIZE*SIZE*sizeof(pixel)];\
  1133. FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(half, src, SIZE*sizeof(pixel), stride);\
  1134. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, src, half, stride, stride, SIZE*sizeof(pixel), SIZE);\
  1135. }\
  1136. \
  1137. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc20)(uint8_t *dst, uint8_t *src, int stride){\
  1138. FUNC(OPNAME ## h264_qpel ## SIZE ## _h_lowpass)(dst, src, stride, stride);\
  1139. }\
  1140. \
  1141. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc30)(uint8_t *dst, uint8_t *src, int stride){\
  1142. uint8_t half[SIZE*SIZE*sizeof(pixel)];\
  1143. FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(half, src, SIZE*sizeof(pixel), stride);\
  1144. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, src+sizeof(pixel), half, stride, stride, SIZE*sizeof(pixel), SIZE);\
  1145. }\
  1146. \
  1147. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc01)(uint8_t *dst, uint8_t *src, int stride){\
  1148. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1149. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1150. uint8_t half[SIZE*SIZE*sizeof(pixel)];\
  1151. FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
  1152. FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(half, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
  1153. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, full_mid, half, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1154. }\
  1155. \
  1156. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc02)(uint8_t *dst, uint8_t *src, int stride){\
  1157. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1158. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1159. FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
  1160. FUNC(OPNAME ## h264_qpel ## SIZE ## _v_lowpass)(dst, full_mid, stride, SIZE*sizeof(pixel));\
  1161. }\
  1162. \
  1163. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc03)(uint8_t *dst, uint8_t *src, int stride){\
  1164. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1165. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1166. uint8_t half[SIZE*SIZE*sizeof(pixel)];\
  1167. FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
  1168. FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(half, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
  1169. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, full_mid+SIZE*sizeof(pixel), half, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1170. }\
  1171. \
  1172. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc11)(uint8_t *dst, uint8_t *src, int stride){\
  1173. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1174. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1175. uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
  1176. uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
  1177. FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
  1178. FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
  1179. FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
  1180. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1181. }\
  1182. \
  1183. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc31)(uint8_t *dst, uint8_t *src, int stride){\
  1184. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1185. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1186. uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
  1187. uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
  1188. FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
  1189. FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel), stride, SIZE + 5);\
  1190. FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
  1191. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1192. }\
  1193. \
  1194. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc13)(uint8_t *dst, uint8_t *src, int stride){\
  1195. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1196. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1197. uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
  1198. uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
  1199. FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
  1200. FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
  1201. FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
  1202. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1203. }\
  1204. \
  1205. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc33)(uint8_t *dst, uint8_t *src, int stride){\
  1206. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1207. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1208. uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
  1209. uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
  1210. FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
  1211. FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel), stride, SIZE + 5);\
  1212. FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
  1213. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1214. }\
  1215. \
  1216. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc22)(uint8_t *dst, uint8_t *src, int stride){\
  1217. int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
  1218. FUNC(OPNAME ## h264_qpel ## SIZE ## _hv_lowpass)(dst, tmp, src, stride, SIZE*sizeof(pixel), stride);\
  1219. }\
  1220. \
  1221. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc21)(uint8_t *dst, uint8_t *src, int stride){\
  1222. int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
  1223. uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
  1224. uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
  1225. FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
  1226. FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
  1227. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1228. }\
  1229. \
  1230. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc23)(uint8_t *dst, uint8_t *src, int stride){\
  1231. int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
  1232. uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
  1233. uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
  1234. FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
  1235. FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
  1236. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1237. }\
  1238. \
  1239. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc12)(uint8_t *dst, uint8_t *src, int stride){\
  1240. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1241. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1242. int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
  1243. uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
  1244. uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
  1245. FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
  1246. FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
  1247. FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
  1248. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfV, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1249. }\
  1250. \
  1251. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc32)(uint8_t *dst, uint8_t *src, int stride){\
  1252. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1253. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1254. int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
  1255. uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
  1256. uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
  1257. FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel), stride, SIZE + 5);\
  1258. FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
  1259. FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
  1260. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfV, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1261. }\
  1262. #define op_avg(a, b) a = (((a)+CLIP(((b) + 16)>>5)+1)>>1)
  1263. //#define op_avg2(a, b) a = (((a)*w1+cm[((b) + 16)>>5]*w2 + o + 64)>>7)
  1264. #define op_put(a, b) a = CLIP(((b) + 16)>>5)
  1265. #define op2_avg(a, b) a = (((a)+CLIP(((b) + 512)>>10)+1)>>1)
  1266. #define op2_put(a, b) a = CLIP(((b) + 512)>>10)
  1267. H264_LOWPASS(put_ , op_put, op2_put)
  1268. H264_LOWPASS(avg_ , op_avg, op2_avg)
  1269. H264_MC(put_, 2)
  1270. H264_MC(put_, 4)
  1271. H264_MC(put_, 8)
  1272. H264_MC(put_, 16)
  1273. H264_MC(avg_, 4)
  1274. H264_MC(avg_, 8)
  1275. H264_MC(avg_, 16)
  1276. #undef op_avg
  1277. #undef op_put
  1278. #undef op2_avg
  1279. #undef op2_put
  1280. #define put_h264_qpel8_mc00_c ff_put_pixels8x8_c
  1281. #define avg_h264_qpel8_mc00_c ff_avg_pixels8x8_c
  1282. #define put_h264_qpel16_mc00_c ff_put_pixels16x16_c
  1283. #define avg_h264_qpel16_mc00_c ff_avg_pixels16x16_c
  1284. void FUNCC(ff_put_pixels8x8)(uint8_t *dst, uint8_t *src, int stride) {
  1285. FUNCC(put_pixels8)(dst, src, stride, 8);
  1286. }
  1287. void FUNCC(ff_avg_pixels8x8)(uint8_t *dst, uint8_t *src, int stride) {
  1288. FUNCC(avg_pixels8)(dst, src, stride, 8);
  1289. }
  1290. void FUNCC(ff_put_pixels16x16)(uint8_t *dst, uint8_t *src, int stride) {
  1291. FUNCC(put_pixels16)(dst, src, stride, 16);
  1292. }
  1293. void FUNCC(ff_avg_pixels16x16)(uint8_t *dst, uint8_t *src, int stride) {
  1294. FUNCC(avg_pixels16)(dst, src, stride, 16);
  1295. }
  1296. static void FUNCC(clear_block)(DCTELEM *block)
  1297. {
  1298. memset(block, 0, sizeof(dctcoef)*64);
  1299. }
  1300. /**
  1301. * memset(blocks, 0, sizeof(DCTELEM)*6*64)
  1302. */
  1303. static void FUNCC(clear_blocks)(DCTELEM *blocks)
  1304. {
  1305. memset(blocks, 0, sizeof(dctcoef)*6*64);
  1306. }