You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1291 lines
55KB

  1. /*
  2. * DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * gmc & q-pel & 32/64 bit based MC by Michael Niedermayer <michaelni@gmx.at>
  7. *
  8. * This file is part of FFmpeg.
  9. *
  10. * FFmpeg is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * FFmpeg is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with FFmpeg; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. /**
  25. * @file
  26. * DSP utils
  27. */
  28. #include "bit_depth_template.c"
  29. static inline void FUNC(copy_block2)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
  30. {
  31. int i;
  32. for(i=0; i<h; i++)
  33. {
  34. AV_WN2P(dst , AV_RN2P(src ));
  35. dst+=dstStride;
  36. src+=srcStride;
  37. }
  38. }
  39. static inline void FUNC(copy_block4)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
  40. {
  41. int i;
  42. for(i=0; i<h; i++)
  43. {
  44. AV_WN4P(dst , AV_RN4P(src ));
  45. dst+=dstStride;
  46. src+=srcStride;
  47. }
  48. }
  49. static inline void FUNC(copy_block8)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
  50. {
  51. int i;
  52. for(i=0; i<h; i++)
  53. {
  54. AV_WN4P(dst , AV_RN4P(src ));
  55. AV_WN4P(dst+4*sizeof(pixel), AV_RN4P(src+4*sizeof(pixel)));
  56. dst+=dstStride;
  57. src+=srcStride;
  58. }
  59. }
  60. static inline void FUNC(copy_block16)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
  61. {
  62. int i;
  63. for(i=0; i<h; i++)
  64. {
  65. AV_WN4P(dst , AV_RN4P(src ));
  66. AV_WN4P(dst+ 4*sizeof(pixel), AV_RN4P(src+ 4*sizeof(pixel)));
  67. AV_WN4P(dst+ 8*sizeof(pixel), AV_RN4P(src+ 8*sizeof(pixel)));
  68. AV_WN4P(dst+12*sizeof(pixel), AV_RN4P(src+12*sizeof(pixel)));
  69. dst+=dstStride;
  70. src+=srcStride;
  71. }
  72. }
  73. /* draw the edges of width 'w' of an image of size width, height */
  74. //FIXME check that this is ok for mpeg4 interlaced
  75. static void FUNCC(draw_edges)(uint8_t *p_buf, int p_wrap, int width, int height, int w, int h, int sides)
  76. {
  77. pixel *buf = (pixel*)p_buf;
  78. int wrap = p_wrap / sizeof(pixel);
  79. pixel *ptr, *last_line;
  80. int i;
  81. /* left and right */
  82. ptr = buf;
  83. for(i=0;i<height;i++) {
  84. #if BIT_DEPTH > 8
  85. int j;
  86. for (j = 0; j < w; j++) {
  87. ptr[j-w] = ptr[0];
  88. ptr[j+width] = ptr[width-1];
  89. }
  90. #else
  91. memset(ptr - w, ptr[0], w);
  92. memset(ptr + width, ptr[width-1], w);
  93. #endif
  94. ptr += wrap;
  95. }
  96. /* top and bottom + corners */
  97. buf -= w;
  98. last_line = buf + (height - 1) * wrap;
  99. if (sides & EDGE_TOP)
  100. for(i = 0; i < h; i++)
  101. memcpy(buf - (i + 1) * wrap, buf, (width + w + w) * sizeof(pixel)); // top
  102. if (sides & EDGE_BOTTOM)
  103. for (i = 0; i < h; i++)
  104. memcpy(last_line + (i + 1) * wrap, last_line, (width + w + w) * sizeof(pixel)); // bottom
  105. }
  106. /**
  107. * Copy a rectangular area of samples to a temporary buffer and replicate the border samples.
  108. * @param buf destination buffer
  109. * @param src source buffer
  110. * @param linesize number of bytes between 2 vertically adjacent samples in both the source and destination buffers
  111. * @param block_w width of block
  112. * @param block_h height of block
  113. * @param src_x x coordinate of the top left sample of the block in the source buffer
  114. * @param src_y y coordinate of the top left sample of the block in the source buffer
  115. * @param w width of the source buffer
  116. * @param h height of the source buffer
  117. */
  118. void FUNC(ff_emulated_edge_mc)(uint8_t *buf, const uint8_t *src, int linesize, int block_w, int block_h,
  119. int src_x, int src_y, int w, int h){
  120. int x, y;
  121. int start_y, start_x, end_y, end_x;
  122. if(src_y>= h){
  123. src+= (h-1-src_y)*linesize;
  124. src_y=h-1;
  125. }else if(src_y<=-block_h){
  126. src+= (1-block_h-src_y)*linesize;
  127. src_y=1-block_h;
  128. }
  129. if(src_x>= w){
  130. src+= (w-1-src_x)*sizeof(pixel);
  131. src_x=w-1;
  132. }else if(src_x<=-block_w){
  133. src+= (1-block_w-src_x)*sizeof(pixel);
  134. src_x=1-block_w;
  135. }
  136. start_y= FFMAX(0, -src_y);
  137. start_x= FFMAX(0, -src_x);
  138. end_y= FFMIN(block_h, h-src_y);
  139. end_x= FFMIN(block_w, w-src_x);
  140. av_assert2(start_y < end_y && block_h);
  141. av_assert2(start_x < end_x && block_w);
  142. w = end_x - start_x;
  143. src += start_y*linesize + start_x*sizeof(pixel);
  144. buf += start_x*sizeof(pixel);
  145. //top
  146. for(y=0; y<start_y; y++){
  147. memcpy(buf, src, w*sizeof(pixel));
  148. buf += linesize;
  149. }
  150. // copy existing part
  151. for(; y<end_y; y++){
  152. memcpy(buf, src, w*sizeof(pixel));
  153. src += linesize;
  154. buf += linesize;
  155. }
  156. //bottom
  157. src -= linesize;
  158. for(; y<block_h; y++){
  159. memcpy(buf, src, w*sizeof(pixel));
  160. buf += linesize;
  161. }
  162. buf -= block_h * linesize + start_x*sizeof(pixel);
  163. while (block_h--){
  164. pixel *bufp = (pixel*)buf;
  165. //left
  166. for(x=0; x<start_x; x++){
  167. bufp[x] = bufp[start_x];
  168. }
  169. //right
  170. for(x=end_x; x<block_w; x++){
  171. bufp[x] = bufp[end_x - 1];
  172. }
  173. buf += linesize;
  174. }
  175. }
  176. #define DCTELEM_FUNCS(dctcoef, suffix) \
  177. static void FUNCC(get_pixels ## suffix)(DCTELEM *restrict _block, \
  178. const uint8_t *_pixels, \
  179. int line_size) \
  180. { \
  181. const pixel *pixels = (const pixel *) _pixels; \
  182. dctcoef *restrict block = (dctcoef *) _block; \
  183. int i; \
  184. \
  185. /* read the pixels */ \
  186. for(i=0;i<8;i++) { \
  187. block[0] = pixels[0]; \
  188. block[1] = pixels[1]; \
  189. block[2] = pixels[2]; \
  190. block[3] = pixels[3]; \
  191. block[4] = pixels[4]; \
  192. block[5] = pixels[5]; \
  193. block[6] = pixels[6]; \
  194. block[7] = pixels[7]; \
  195. pixels += line_size / sizeof(pixel); \
  196. block += 8; \
  197. } \
  198. } \
  199. \
  200. static void FUNCC(add_pixels8 ## suffix)(uint8_t *restrict _pixels, \
  201. DCTELEM *_block, \
  202. int line_size) \
  203. { \
  204. int i; \
  205. pixel *restrict pixels = (pixel *restrict)_pixels; \
  206. dctcoef *block = (dctcoef*)_block; \
  207. line_size /= sizeof(pixel); \
  208. \
  209. for(i=0;i<8;i++) { \
  210. pixels[0] += block[0]; \
  211. pixels[1] += block[1]; \
  212. pixels[2] += block[2]; \
  213. pixels[3] += block[3]; \
  214. pixels[4] += block[4]; \
  215. pixels[5] += block[5]; \
  216. pixels[6] += block[6]; \
  217. pixels[7] += block[7]; \
  218. pixels += line_size; \
  219. block += 8; \
  220. } \
  221. } \
  222. \
  223. static void FUNCC(add_pixels4 ## suffix)(uint8_t *restrict _pixels, \
  224. DCTELEM *_block, \
  225. int line_size) \
  226. { \
  227. int i; \
  228. pixel *restrict pixels = (pixel *restrict)_pixels; \
  229. dctcoef *block = (dctcoef*)_block; \
  230. line_size /= sizeof(pixel); \
  231. \
  232. for(i=0;i<4;i++) { \
  233. pixels[0] += block[0]; \
  234. pixels[1] += block[1]; \
  235. pixels[2] += block[2]; \
  236. pixels[3] += block[3]; \
  237. pixels += line_size; \
  238. block += 4; \
  239. } \
  240. } \
  241. \
  242. static void FUNCC(clear_block ## suffix)(DCTELEM *block) \
  243. { \
  244. memset(block, 0, sizeof(dctcoef)*64); \
  245. } \
  246. \
  247. /** \
  248. * memset(blocks, 0, sizeof(DCTELEM)*6*64) \
  249. */ \
  250. static void FUNCC(clear_blocks ## suffix)(DCTELEM *blocks) \
  251. { \
  252. memset(blocks, 0, sizeof(dctcoef)*6*64); \
  253. }
  254. DCTELEM_FUNCS(DCTELEM, _16)
  255. #if BIT_DEPTH > 8
  256. DCTELEM_FUNCS(dctcoef, _32)
  257. #endif
  258. #define PIXOP2(OPNAME, OP) \
  259. static void FUNCC(OPNAME ## _pixels2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  260. int i;\
  261. for(i=0; i<h; i++){\
  262. OP(*((pixel2*)(block )), AV_RN2P(pixels ));\
  263. pixels+=line_size;\
  264. block +=line_size;\
  265. }\
  266. }\
  267. static void FUNCC(OPNAME ## _pixels4)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  268. int i;\
  269. for(i=0; i<h; i++){\
  270. OP(*((pixel4*)(block )), AV_RN4P(pixels ));\
  271. pixels+=line_size;\
  272. block +=line_size;\
  273. }\
  274. }\
  275. static void FUNCC(OPNAME ## _pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  276. int i;\
  277. for(i=0; i<h; i++){\
  278. OP(*((pixel4*)(block )), AV_RN4P(pixels ));\
  279. OP(*((pixel4*)(block+4*sizeof(pixel))), AV_RN4P(pixels+4*sizeof(pixel)));\
  280. pixels+=line_size;\
  281. block +=line_size;\
  282. }\
  283. }\
  284. static inline void FUNCC(OPNAME ## _no_rnd_pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  285. FUNCC(OPNAME ## _pixels8)(block, pixels, line_size, h);\
  286. }\
  287. \
  288. static inline void FUNC(OPNAME ## _no_rnd_pixels8_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  289. int src_stride1, int src_stride2, int h){\
  290. int i;\
  291. for(i=0; i<h; i++){\
  292. pixel4 a,b;\
  293. a= AV_RN4P(&src1[i*src_stride1 ]);\
  294. b= AV_RN4P(&src2[i*src_stride2 ]);\
  295. OP(*((pixel4*)&dst[i*dst_stride ]), no_rnd_avg_pixel4(a, b));\
  296. a= AV_RN4P(&src1[i*src_stride1+4*sizeof(pixel)]);\
  297. b= AV_RN4P(&src2[i*src_stride2+4*sizeof(pixel)]);\
  298. OP(*((pixel4*)&dst[i*dst_stride+4*sizeof(pixel)]), no_rnd_avg_pixel4(a, b));\
  299. }\
  300. }\
  301. \
  302. static inline void FUNC(OPNAME ## _pixels8_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  303. int src_stride1, int src_stride2, int h){\
  304. int i;\
  305. for(i=0; i<h; i++){\
  306. pixel4 a,b;\
  307. a= AV_RN4P(&src1[i*src_stride1 ]);\
  308. b= AV_RN4P(&src2[i*src_stride2 ]);\
  309. OP(*((pixel4*)&dst[i*dst_stride ]), rnd_avg_pixel4(a, b));\
  310. a= AV_RN4P(&src1[i*src_stride1+4*sizeof(pixel)]);\
  311. b= AV_RN4P(&src2[i*src_stride2+4*sizeof(pixel)]);\
  312. OP(*((pixel4*)&dst[i*dst_stride+4*sizeof(pixel)]), rnd_avg_pixel4(a, b));\
  313. }\
  314. }\
  315. \
  316. static inline void FUNC(OPNAME ## _pixels4_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  317. int src_stride1, int src_stride2, int h){\
  318. int i;\
  319. for(i=0; i<h; i++){\
  320. pixel4 a,b;\
  321. a= AV_RN4P(&src1[i*src_stride1 ]);\
  322. b= AV_RN4P(&src2[i*src_stride2 ]);\
  323. OP(*((pixel4*)&dst[i*dst_stride ]), rnd_avg_pixel4(a, b));\
  324. }\
  325. }\
  326. \
  327. static inline void FUNC(OPNAME ## _pixels2_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  328. int src_stride1, int src_stride2, int h){\
  329. int i;\
  330. for(i=0; i<h; i++){\
  331. pixel4 a,b;\
  332. a= AV_RN2P(&src1[i*src_stride1 ]);\
  333. b= AV_RN2P(&src2[i*src_stride2 ]);\
  334. OP(*((pixel2*)&dst[i*dst_stride ]), rnd_avg_pixel4(a, b));\
  335. }\
  336. }\
  337. \
  338. static inline void FUNC(OPNAME ## _pixels16_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  339. int src_stride1, int src_stride2, int h){\
  340. FUNC(OPNAME ## _pixels8_l2)(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);\
  341. FUNC(OPNAME ## _pixels8_l2)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, h);\
  342. }\
  343. \
  344. static inline void FUNC(OPNAME ## _no_rnd_pixels16_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  345. int src_stride1, int src_stride2, int h){\
  346. FUNC(OPNAME ## _no_rnd_pixels8_l2)(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);\
  347. FUNC(OPNAME ## _no_rnd_pixels8_l2)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, h);\
  348. }\
  349. \
  350. static inline void FUNCC(OPNAME ## _no_rnd_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  351. FUNC(OPNAME ## _no_rnd_pixels8_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
  352. }\
  353. \
  354. static inline void FUNCC(OPNAME ## _pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  355. FUNC(OPNAME ## _pixels8_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
  356. }\
  357. \
  358. static inline void FUNCC(OPNAME ## _no_rnd_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  359. FUNC(OPNAME ## _no_rnd_pixels8_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
  360. }\
  361. \
  362. static inline void FUNCC(OPNAME ## _pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  363. FUNC(OPNAME ## _pixels8_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
  364. }\
  365. \
  366. static inline void FUNC(OPNAME ## _pixels8_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
  367. int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  368. /* FIXME HIGH BIT DEPTH */\
  369. int i;\
  370. for(i=0; i<h; i++){\
  371. uint32_t a, b, c, d, l0, l1, h0, h1;\
  372. a= AV_RN32(&src1[i*src_stride1]);\
  373. b= AV_RN32(&src2[i*src_stride2]);\
  374. c= AV_RN32(&src3[i*src_stride3]);\
  375. d= AV_RN32(&src4[i*src_stride4]);\
  376. l0= (a&0x03030303UL)\
  377. + (b&0x03030303UL)\
  378. + 0x02020202UL;\
  379. h0= ((a&0xFCFCFCFCUL)>>2)\
  380. + ((b&0xFCFCFCFCUL)>>2);\
  381. l1= (c&0x03030303UL)\
  382. + (d&0x03030303UL);\
  383. h1= ((c&0xFCFCFCFCUL)>>2)\
  384. + ((d&0xFCFCFCFCUL)>>2);\
  385. OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  386. a= AV_RN32(&src1[i*src_stride1+4]);\
  387. b= AV_RN32(&src2[i*src_stride2+4]);\
  388. c= AV_RN32(&src3[i*src_stride3+4]);\
  389. d= AV_RN32(&src4[i*src_stride4+4]);\
  390. l0= (a&0x03030303UL)\
  391. + (b&0x03030303UL)\
  392. + 0x02020202UL;\
  393. h0= ((a&0xFCFCFCFCUL)>>2)\
  394. + ((b&0xFCFCFCFCUL)>>2);\
  395. l1= (c&0x03030303UL)\
  396. + (d&0x03030303UL);\
  397. h1= ((c&0xFCFCFCFCUL)>>2)\
  398. + ((d&0xFCFCFCFCUL)>>2);\
  399. OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  400. }\
  401. }\
  402. \
  403. static inline void FUNCC(OPNAME ## _pixels4_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  404. FUNC(OPNAME ## _pixels4_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
  405. }\
  406. \
  407. static inline void FUNCC(OPNAME ## _pixels4_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  408. FUNC(OPNAME ## _pixels4_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
  409. }\
  410. \
  411. static inline void FUNCC(OPNAME ## _pixels2_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  412. FUNC(OPNAME ## _pixels2_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
  413. }\
  414. \
  415. static inline void FUNCC(OPNAME ## _pixels2_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  416. FUNC(OPNAME ## _pixels2_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
  417. }\
  418. \
  419. static inline void FUNC(OPNAME ## _no_rnd_pixels8_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
  420. int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  421. /* FIXME HIGH BIT DEPTH*/\
  422. int i;\
  423. for(i=0; i<h; i++){\
  424. uint32_t a, b, c, d, l0, l1, h0, h1;\
  425. a= AV_RN32(&src1[i*src_stride1]);\
  426. b= AV_RN32(&src2[i*src_stride2]);\
  427. c= AV_RN32(&src3[i*src_stride3]);\
  428. d= AV_RN32(&src4[i*src_stride4]);\
  429. l0= (a&0x03030303UL)\
  430. + (b&0x03030303UL)\
  431. + 0x01010101UL;\
  432. h0= ((a&0xFCFCFCFCUL)>>2)\
  433. + ((b&0xFCFCFCFCUL)>>2);\
  434. l1= (c&0x03030303UL)\
  435. + (d&0x03030303UL);\
  436. h1= ((c&0xFCFCFCFCUL)>>2)\
  437. + ((d&0xFCFCFCFCUL)>>2);\
  438. OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  439. a= AV_RN32(&src1[i*src_stride1+4]);\
  440. b= AV_RN32(&src2[i*src_stride2+4]);\
  441. c= AV_RN32(&src3[i*src_stride3+4]);\
  442. d= AV_RN32(&src4[i*src_stride4+4]);\
  443. l0= (a&0x03030303UL)\
  444. + (b&0x03030303UL)\
  445. + 0x01010101UL;\
  446. h0= ((a&0xFCFCFCFCUL)>>2)\
  447. + ((b&0xFCFCFCFCUL)>>2);\
  448. l1= (c&0x03030303UL)\
  449. + (d&0x03030303UL);\
  450. h1= ((c&0xFCFCFCFCUL)>>2)\
  451. + ((d&0xFCFCFCFCUL)>>2);\
  452. OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  453. }\
  454. }\
  455. static inline void FUNC(OPNAME ## _pixels16_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
  456. int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  457. FUNC(OPNAME ## _pixels8_l4)(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
  458. FUNC(OPNAME ## _pixels8_l4)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), src3+8*sizeof(pixel), src4+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
  459. }\
  460. static inline void FUNC(OPNAME ## _no_rnd_pixels16_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
  461. int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  462. FUNC(OPNAME ## _no_rnd_pixels8_l4)(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
  463. FUNC(OPNAME ## _no_rnd_pixels8_l4)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), src3+8*sizeof(pixel), src4+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
  464. }\
  465. \
  466. static inline void FUNCC(OPNAME ## _pixels2_xy2)(uint8_t *p_block, const uint8_t *p_pixels, int line_size, int h)\
  467. {\
  468. int i, a0, b0, a1, b1;\
  469. pixel *block = (pixel*)p_block;\
  470. const pixel *pixels = (const pixel*)p_pixels;\
  471. line_size >>= sizeof(pixel)-1;\
  472. a0= pixels[0];\
  473. b0= pixels[1] + 2;\
  474. a0 += b0;\
  475. b0 += pixels[2];\
  476. \
  477. pixels+=line_size;\
  478. for(i=0; i<h; i+=2){\
  479. a1= pixels[0];\
  480. b1= pixels[1];\
  481. a1 += b1;\
  482. b1 += pixels[2];\
  483. \
  484. block[0]= (a1+a0)>>2; /* FIXME non put */\
  485. block[1]= (b1+b0)>>2;\
  486. \
  487. pixels+=line_size;\
  488. block +=line_size;\
  489. \
  490. a0= pixels[0];\
  491. b0= pixels[1] + 2;\
  492. a0 += b0;\
  493. b0 += pixels[2];\
  494. \
  495. block[0]= (a1+a0)>>2;\
  496. block[1]= (b1+b0)>>2;\
  497. pixels+=line_size;\
  498. block +=line_size;\
  499. }\
  500. }\
  501. \
  502. static inline void FUNCC(OPNAME ## _pixels4_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  503. {\
  504. /* FIXME HIGH BIT DEPTH */\
  505. int i;\
  506. const uint32_t a= AV_RN32(pixels );\
  507. const uint32_t b= AV_RN32(pixels+1);\
  508. uint32_t l0= (a&0x03030303UL)\
  509. + (b&0x03030303UL)\
  510. + 0x02020202UL;\
  511. uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
  512. + ((b&0xFCFCFCFCUL)>>2);\
  513. uint32_t l1,h1;\
  514. \
  515. pixels+=line_size;\
  516. for(i=0; i<h; i+=2){\
  517. uint32_t a= AV_RN32(pixels );\
  518. uint32_t b= AV_RN32(pixels+1);\
  519. l1= (a&0x03030303UL)\
  520. + (b&0x03030303UL);\
  521. h1= ((a&0xFCFCFCFCUL)>>2)\
  522. + ((b&0xFCFCFCFCUL)>>2);\
  523. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  524. pixels+=line_size;\
  525. block +=line_size;\
  526. a= AV_RN32(pixels );\
  527. b= AV_RN32(pixels+1);\
  528. l0= (a&0x03030303UL)\
  529. + (b&0x03030303UL)\
  530. + 0x02020202UL;\
  531. h0= ((a&0xFCFCFCFCUL)>>2)\
  532. + ((b&0xFCFCFCFCUL)>>2);\
  533. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  534. pixels+=line_size;\
  535. block +=line_size;\
  536. }\
  537. }\
  538. \
  539. static inline void FUNCC(OPNAME ## _pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  540. {\
  541. /* FIXME HIGH BIT DEPTH */\
  542. int j;\
  543. for(j=0; j<2; j++){\
  544. int i;\
  545. const uint32_t a= AV_RN32(pixels );\
  546. const uint32_t b= AV_RN32(pixels+1);\
  547. uint32_t l0= (a&0x03030303UL)\
  548. + (b&0x03030303UL)\
  549. + 0x02020202UL;\
  550. uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
  551. + ((b&0xFCFCFCFCUL)>>2);\
  552. uint32_t l1,h1;\
  553. \
  554. pixels+=line_size;\
  555. for(i=0; i<h; i+=2){\
  556. uint32_t a= AV_RN32(pixels );\
  557. uint32_t b= AV_RN32(pixels+1);\
  558. l1= (a&0x03030303UL)\
  559. + (b&0x03030303UL);\
  560. h1= ((a&0xFCFCFCFCUL)>>2)\
  561. + ((b&0xFCFCFCFCUL)>>2);\
  562. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  563. pixels+=line_size;\
  564. block +=line_size;\
  565. a= AV_RN32(pixels );\
  566. b= AV_RN32(pixels+1);\
  567. l0= (a&0x03030303UL)\
  568. + (b&0x03030303UL)\
  569. + 0x02020202UL;\
  570. h0= ((a&0xFCFCFCFCUL)>>2)\
  571. + ((b&0xFCFCFCFCUL)>>2);\
  572. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  573. pixels+=line_size;\
  574. block +=line_size;\
  575. }\
  576. pixels+=4-line_size*(h+1);\
  577. block +=4-line_size*h;\
  578. }\
  579. }\
  580. \
  581. static inline void FUNCC(OPNAME ## _no_rnd_pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  582. {\
  583. /* FIXME HIGH BIT DEPTH */\
  584. int j;\
  585. for(j=0; j<2; j++){\
  586. int i;\
  587. const uint32_t a= AV_RN32(pixels );\
  588. const uint32_t b= AV_RN32(pixels+1);\
  589. uint32_t l0= (a&0x03030303UL)\
  590. + (b&0x03030303UL)\
  591. + 0x01010101UL;\
  592. uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
  593. + ((b&0xFCFCFCFCUL)>>2);\
  594. uint32_t l1,h1;\
  595. \
  596. pixels+=line_size;\
  597. for(i=0; i<h; i+=2){\
  598. uint32_t a= AV_RN32(pixels );\
  599. uint32_t b= AV_RN32(pixels+1);\
  600. l1= (a&0x03030303UL)\
  601. + (b&0x03030303UL);\
  602. h1= ((a&0xFCFCFCFCUL)>>2)\
  603. + ((b&0xFCFCFCFCUL)>>2);\
  604. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  605. pixels+=line_size;\
  606. block +=line_size;\
  607. a= AV_RN32(pixels );\
  608. b= AV_RN32(pixels+1);\
  609. l0= (a&0x03030303UL)\
  610. + (b&0x03030303UL)\
  611. + 0x01010101UL;\
  612. h0= ((a&0xFCFCFCFCUL)>>2)\
  613. + ((b&0xFCFCFCFCUL)>>2);\
  614. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  615. pixels+=line_size;\
  616. block +=line_size;\
  617. }\
  618. pixels+=4-line_size*(h+1);\
  619. block +=4-line_size*h;\
  620. }\
  621. }\
  622. \
  623. CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16) , FUNCC(OPNAME ## _pixels8) , 8*sizeof(pixel))\
  624. CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_x2) , FUNCC(OPNAME ## _pixels8_x2) , 8*sizeof(pixel))\
  625. CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_y2) , FUNCC(OPNAME ## _pixels8_y2) , 8*sizeof(pixel))\
  626. CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_xy2), FUNCC(OPNAME ## _pixels8_xy2), 8*sizeof(pixel))\
  627. av_unused CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16) , FUNCC(OPNAME ## _pixels8) , 8*sizeof(pixel))\
  628. CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_x2) , FUNCC(OPNAME ## _no_rnd_pixels8_x2) , 8*sizeof(pixel))\
  629. CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_y2) , FUNCC(OPNAME ## _no_rnd_pixels8_y2) , 8*sizeof(pixel))\
  630. CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_xy2), FUNCC(OPNAME ## _no_rnd_pixels8_xy2), 8*sizeof(pixel))\
  631. #define op_avg(a, b) a = rnd_avg_pixel4(a, b)
  632. #define op_put(a, b) a = b
  633. PIXOP2(avg, op_avg)
  634. PIXOP2(put, op_put)
  635. #undef op_avg
  636. #undef op_put
  637. #define put_no_rnd_pixels8_c put_pixels8_c
  638. #define put_no_rnd_pixels16_c put_pixels16_c
  639. static void FUNCC(put_no_rnd_pixels16_l2)(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h){
  640. FUNC(put_no_rnd_pixels16_l2)(dst, a, b, stride, stride, stride, h);
  641. }
  642. static void FUNCC(put_no_rnd_pixels8_l2)(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h){
  643. FUNC(put_no_rnd_pixels8_l2)(dst, a, b, stride, stride, stride, h);
  644. }
  645. #define H264_CHROMA_MC(OPNAME, OP)\
  646. static void FUNCC(OPNAME ## h264_chroma_mc2)(uint8_t *p_dst/*align 8*/, uint8_t *p_src/*align 1*/, int stride, int h, int x, int y){\
  647. pixel *dst = (pixel*)p_dst;\
  648. pixel *src = (pixel*)p_src;\
  649. const int A=(8-x)*(8-y);\
  650. const int B=( x)*(8-y);\
  651. const int C=(8-x)*( y);\
  652. const int D=( x)*( y);\
  653. int i;\
  654. stride >>= sizeof(pixel)-1;\
  655. \
  656. assert(x<8 && y<8 && x>=0 && y>=0);\
  657. \
  658. if(D){\
  659. for(i=0; i<h; i++){\
  660. OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
  661. OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
  662. dst+= stride;\
  663. src+= stride;\
  664. }\
  665. }else{\
  666. const int E= B+C;\
  667. const int step= C ? stride : 1;\
  668. for(i=0; i<h; i++){\
  669. OP(dst[0], (A*src[0] + E*src[step+0]));\
  670. OP(dst[1], (A*src[1] + E*src[step+1]));\
  671. dst+= stride;\
  672. src+= stride;\
  673. }\
  674. }\
  675. }\
  676. \
  677. static void FUNCC(OPNAME ## h264_chroma_mc4)(uint8_t *p_dst/*align 8*/, uint8_t *p_src/*align 1*/, int stride, int h, int x, int y){\
  678. pixel *dst = (pixel*)p_dst;\
  679. pixel *src = (pixel*)p_src;\
  680. const int A=(8-x)*(8-y);\
  681. const int B=( x)*(8-y);\
  682. const int C=(8-x)*( y);\
  683. const int D=( x)*( y);\
  684. int i;\
  685. stride >>= sizeof(pixel)-1;\
  686. \
  687. av_assert2(x<8 && y<8 && x>=0 && y>=0);\
  688. \
  689. if(D){\
  690. for(i=0; i<h; i++){\
  691. OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
  692. OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
  693. OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
  694. OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
  695. dst+= stride;\
  696. src+= stride;\
  697. }\
  698. }else{\
  699. const int E= B+C;\
  700. const int step= C ? stride : 1;\
  701. for(i=0; i<h; i++){\
  702. OP(dst[0], (A*src[0] + E*src[step+0]));\
  703. OP(dst[1], (A*src[1] + E*src[step+1]));\
  704. OP(dst[2], (A*src[2] + E*src[step+2]));\
  705. OP(dst[3], (A*src[3] + E*src[step+3]));\
  706. dst+= stride;\
  707. src+= stride;\
  708. }\
  709. }\
  710. }\
  711. \
  712. static void FUNCC(OPNAME ## h264_chroma_mc8)(uint8_t *p_dst/*align 8*/, uint8_t *p_src/*align 1*/, int stride, int h, int x, int y){\
  713. pixel *dst = (pixel*)p_dst;\
  714. pixel *src = (pixel*)p_src;\
  715. const int A=(8-x)*(8-y);\
  716. const int B=( x)*(8-y);\
  717. const int C=(8-x)*( y);\
  718. const int D=( x)*( y);\
  719. int i;\
  720. stride >>= sizeof(pixel)-1;\
  721. \
  722. av_assert2(x<8 && y<8 && x>=0 && y>=0);\
  723. \
  724. if(D){\
  725. for(i=0; i<h; i++){\
  726. OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
  727. OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
  728. OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
  729. OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
  730. OP(dst[4], (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5]));\
  731. OP(dst[5], (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6]));\
  732. OP(dst[6], (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7]));\
  733. OP(dst[7], (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8]));\
  734. dst+= stride;\
  735. src+= stride;\
  736. }\
  737. }else{\
  738. const int E= B+C;\
  739. const int step= C ? stride : 1;\
  740. for(i=0; i<h; i++){\
  741. OP(dst[0], (A*src[0] + E*src[step+0]));\
  742. OP(dst[1], (A*src[1] + E*src[step+1]));\
  743. OP(dst[2], (A*src[2] + E*src[step+2]));\
  744. OP(dst[3], (A*src[3] + E*src[step+3]));\
  745. OP(dst[4], (A*src[4] + E*src[step+4]));\
  746. OP(dst[5], (A*src[5] + E*src[step+5]));\
  747. OP(dst[6], (A*src[6] + E*src[step+6]));\
  748. OP(dst[7], (A*src[7] + E*src[step+7]));\
  749. dst+= stride;\
  750. src+= stride;\
  751. }\
  752. }\
  753. }
  754. #define op_avg(a, b) a = (((a)+(((b) + 32)>>6)+1)>>1)
  755. #define op_put(a, b) a = (((b) + 32)>>6)
  756. H264_CHROMA_MC(put_ , op_put)
  757. H264_CHROMA_MC(avg_ , op_avg)
  758. #undef op_avg
  759. #undef op_put
  760. #define H264_LOWPASS(OPNAME, OP, OP2) \
  761. static av_unused void FUNC(OPNAME ## h264_qpel2_h_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
  762. const int h=2;\
  763. INIT_CLIP\
  764. int i;\
  765. pixel *dst = (pixel*)p_dst;\
  766. pixel *src = (pixel*)p_src;\
  767. dstStride >>= sizeof(pixel)-1;\
  768. srcStride >>= sizeof(pixel)-1;\
  769. for(i=0; i<h; i++)\
  770. {\
  771. OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
  772. OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
  773. dst+=dstStride;\
  774. src+=srcStride;\
  775. }\
  776. }\
  777. \
  778. static av_unused void FUNC(OPNAME ## h264_qpel2_v_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
  779. const int w=2;\
  780. INIT_CLIP\
  781. int i;\
  782. pixel *dst = (pixel*)p_dst;\
  783. pixel *src = (pixel*)p_src;\
  784. dstStride >>= sizeof(pixel)-1;\
  785. srcStride >>= sizeof(pixel)-1;\
  786. for(i=0; i<w; i++)\
  787. {\
  788. const int srcB= src[-2*srcStride];\
  789. const int srcA= src[-1*srcStride];\
  790. const int src0= src[0 *srcStride];\
  791. const int src1= src[1 *srcStride];\
  792. const int src2= src[2 *srcStride];\
  793. const int src3= src[3 *srcStride];\
  794. const int src4= src[4 *srcStride];\
  795. OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
  796. OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
  797. dst++;\
  798. src++;\
  799. }\
  800. }\
  801. \
  802. static av_unused void FUNC(OPNAME ## h264_qpel2_hv_lowpass)(uint8_t *p_dst, pixeltmp *tmp, uint8_t *p_src, int dstStride, int tmpStride, int srcStride){\
  803. const int h=2;\
  804. const int w=2;\
  805. const int pad = (BIT_DEPTH == 10) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
  806. INIT_CLIP\
  807. int i;\
  808. pixel *dst = (pixel*)p_dst;\
  809. pixel *src = (pixel*)p_src;\
  810. dstStride >>= sizeof(pixel)-1;\
  811. srcStride >>= sizeof(pixel)-1;\
  812. src -= 2*srcStride;\
  813. for(i=0; i<h+5; i++)\
  814. {\
  815. tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]) + pad;\
  816. tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]) + pad;\
  817. tmp+=tmpStride;\
  818. src+=srcStride;\
  819. }\
  820. tmp -= tmpStride*(h+5-2);\
  821. for(i=0; i<w; i++)\
  822. {\
  823. const int tmpB= tmp[-2*tmpStride] - pad;\
  824. const int tmpA= tmp[-1*tmpStride] - pad;\
  825. const int tmp0= tmp[0 *tmpStride] - pad;\
  826. const int tmp1= tmp[1 *tmpStride] - pad;\
  827. const int tmp2= tmp[2 *tmpStride] - pad;\
  828. const int tmp3= tmp[3 *tmpStride] - pad;\
  829. const int tmp4= tmp[4 *tmpStride] - pad;\
  830. OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
  831. OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
  832. dst++;\
  833. tmp++;\
  834. }\
  835. }\
  836. static void FUNC(OPNAME ## h264_qpel4_h_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
  837. const int h=4;\
  838. INIT_CLIP\
  839. int i;\
  840. pixel *dst = (pixel*)p_dst;\
  841. pixel *src = (pixel*)p_src;\
  842. dstStride >>= sizeof(pixel)-1;\
  843. srcStride >>= sizeof(pixel)-1;\
  844. for(i=0; i<h; i++)\
  845. {\
  846. OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
  847. OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
  848. OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]));\
  849. OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]));\
  850. dst+=dstStride;\
  851. src+=srcStride;\
  852. }\
  853. }\
  854. \
  855. static void FUNC(OPNAME ## h264_qpel4_v_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
  856. const int w=4;\
  857. INIT_CLIP\
  858. int i;\
  859. pixel *dst = (pixel*)p_dst;\
  860. pixel *src = (pixel*)p_src;\
  861. dstStride >>= sizeof(pixel)-1;\
  862. srcStride >>= sizeof(pixel)-1;\
  863. for(i=0; i<w; i++)\
  864. {\
  865. const int srcB= src[-2*srcStride];\
  866. const int srcA= src[-1*srcStride];\
  867. const int src0= src[0 *srcStride];\
  868. const int src1= src[1 *srcStride];\
  869. const int src2= src[2 *srcStride];\
  870. const int src3= src[3 *srcStride];\
  871. const int src4= src[4 *srcStride];\
  872. const int src5= src[5 *srcStride];\
  873. const int src6= src[6 *srcStride];\
  874. OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
  875. OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
  876. OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
  877. OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
  878. dst++;\
  879. src++;\
  880. }\
  881. }\
  882. \
  883. static void FUNC(OPNAME ## h264_qpel4_hv_lowpass)(uint8_t *p_dst, pixeltmp *tmp, uint8_t *p_src, int dstStride, int tmpStride, int srcStride){\
  884. const int h=4;\
  885. const int w=4;\
  886. const int pad = (BIT_DEPTH == 10) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
  887. INIT_CLIP\
  888. int i;\
  889. pixel *dst = (pixel*)p_dst;\
  890. pixel *src = (pixel*)p_src;\
  891. dstStride >>= sizeof(pixel)-1;\
  892. srcStride >>= sizeof(pixel)-1;\
  893. src -= 2*srcStride;\
  894. for(i=0; i<h+5; i++)\
  895. {\
  896. tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]) + pad;\
  897. tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]) + pad;\
  898. tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]) + pad;\
  899. tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]) + pad;\
  900. tmp+=tmpStride;\
  901. src+=srcStride;\
  902. }\
  903. tmp -= tmpStride*(h+5-2);\
  904. for(i=0; i<w; i++)\
  905. {\
  906. const int tmpB= tmp[-2*tmpStride] - pad;\
  907. const int tmpA= tmp[-1*tmpStride] - pad;\
  908. const int tmp0= tmp[0 *tmpStride] - pad;\
  909. const int tmp1= tmp[1 *tmpStride] - pad;\
  910. const int tmp2= tmp[2 *tmpStride] - pad;\
  911. const int tmp3= tmp[3 *tmpStride] - pad;\
  912. const int tmp4= tmp[4 *tmpStride] - pad;\
  913. const int tmp5= tmp[5 *tmpStride] - pad;\
  914. const int tmp6= tmp[6 *tmpStride] - pad;\
  915. OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
  916. OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
  917. OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
  918. OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
  919. dst++;\
  920. tmp++;\
  921. }\
  922. }\
  923. \
  924. static void FUNC(OPNAME ## h264_qpel8_h_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
  925. const int h=8;\
  926. INIT_CLIP\
  927. int i;\
  928. pixel *dst = (pixel*)p_dst;\
  929. pixel *src = (pixel*)p_src;\
  930. dstStride >>= sizeof(pixel)-1;\
  931. srcStride >>= sizeof(pixel)-1;\
  932. for(i=0; i<h; i++)\
  933. {\
  934. OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]));\
  935. OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]));\
  936. OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]));\
  937. OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]));\
  938. OP(dst[4], (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]));\
  939. OP(dst[5], (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]));\
  940. OP(dst[6], (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]));\
  941. OP(dst[7], (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]));\
  942. dst+=dstStride;\
  943. src+=srcStride;\
  944. }\
  945. }\
  946. \
  947. static void FUNC(OPNAME ## h264_qpel8_v_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
  948. const int w=8;\
  949. INIT_CLIP\
  950. int i;\
  951. pixel *dst = (pixel*)p_dst;\
  952. pixel *src = (pixel*)p_src;\
  953. dstStride >>= sizeof(pixel)-1;\
  954. srcStride >>= sizeof(pixel)-1;\
  955. for(i=0; i<w; i++)\
  956. {\
  957. const int srcB= src[-2*srcStride];\
  958. const int srcA= src[-1*srcStride];\
  959. const int src0= src[0 *srcStride];\
  960. const int src1= src[1 *srcStride];\
  961. const int src2= src[2 *srcStride];\
  962. const int src3= src[3 *srcStride];\
  963. const int src4= src[4 *srcStride];\
  964. const int src5= src[5 *srcStride];\
  965. const int src6= src[6 *srcStride];\
  966. const int src7= src[7 *srcStride];\
  967. const int src8= src[8 *srcStride];\
  968. const int src9= src[9 *srcStride];\
  969. const int src10=src[10*srcStride];\
  970. OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
  971. OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
  972. OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
  973. OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
  974. OP(dst[4*dstStride], (src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
  975. OP(dst[5*dstStride], (src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
  976. OP(dst[6*dstStride], (src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
  977. OP(dst[7*dstStride], (src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
  978. dst++;\
  979. src++;\
  980. }\
  981. }\
  982. \
  983. static void FUNC(OPNAME ## h264_qpel8_hv_lowpass)(uint8_t *p_dst, pixeltmp *tmp, uint8_t *p_src, int dstStride, int tmpStride, int srcStride){\
  984. const int h=8;\
  985. const int w=8;\
  986. const int pad = (BIT_DEPTH == 10) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
  987. INIT_CLIP\
  988. int i;\
  989. pixel *dst = (pixel*)p_dst;\
  990. pixel *src = (pixel*)p_src;\
  991. dstStride >>= sizeof(pixel)-1;\
  992. srcStride >>= sizeof(pixel)-1;\
  993. src -= 2*srcStride;\
  994. for(i=0; i<h+5; i++)\
  995. {\
  996. tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]) + pad;\
  997. tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]) + pad;\
  998. tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]) + pad;\
  999. tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]) + pad;\
  1000. tmp[4]= (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]) + pad;\
  1001. tmp[5]= (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]) + pad;\
  1002. tmp[6]= (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]) + pad;\
  1003. tmp[7]= (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]) + pad;\
  1004. tmp+=tmpStride;\
  1005. src+=srcStride;\
  1006. }\
  1007. tmp -= tmpStride*(h+5-2);\
  1008. for(i=0; i<w; i++)\
  1009. {\
  1010. const int tmpB= tmp[-2*tmpStride] - pad;\
  1011. const int tmpA= tmp[-1*tmpStride] - pad;\
  1012. const int tmp0= tmp[0 *tmpStride] - pad;\
  1013. const int tmp1= tmp[1 *tmpStride] - pad;\
  1014. const int tmp2= tmp[2 *tmpStride] - pad;\
  1015. const int tmp3= tmp[3 *tmpStride] - pad;\
  1016. const int tmp4= tmp[4 *tmpStride] - pad;\
  1017. const int tmp5= tmp[5 *tmpStride] - pad;\
  1018. const int tmp6= tmp[6 *tmpStride] - pad;\
  1019. const int tmp7= tmp[7 *tmpStride] - pad;\
  1020. const int tmp8= tmp[8 *tmpStride] - pad;\
  1021. const int tmp9= tmp[9 *tmpStride] - pad;\
  1022. const int tmp10=tmp[10*tmpStride] - pad;\
  1023. OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
  1024. OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
  1025. OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
  1026. OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
  1027. OP2(dst[4*dstStride], (tmp4+tmp5)*20 - (tmp3+tmp6)*5 + (tmp2+tmp7));\
  1028. OP2(dst[5*dstStride], (tmp5+tmp6)*20 - (tmp4+tmp7)*5 + (tmp3+tmp8));\
  1029. OP2(dst[6*dstStride], (tmp6+tmp7)*20 - (tmp5+tmp8)*5 + (tmp4+tmp9));\
  1030. OP2(dst[7*dstStride], (tmp7+tmp8)*20 - (tmp6+tmp9)*5 + (tmp5+tmp10));\
  1031. dst++;\
  1032. tmp++;\
  1033. }\
  1034. }\
  1035. \
  1036. static void FUNC(OPNAME ## h264_qpel16_v_lowpass)(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1037. FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst , src , dstStride, srcStride);\
  1038. FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
  1039. src += 8*srcStride;\
  1040. dst += 8*dstStride;\
  1041. FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst , src , dstStride, srcStride);\
  1042. FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
  1043. }\
  1044. \
  1045. static void FUNC(OPNAME ## h264_qpel16_h_lowpass)(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1046. FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst , src , dstStride, srcStride);\
  1047. FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
  1048. src += 8*srcStride;\
  1049. dst += 8*dstStride;\
  1050. FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst , src , dstStride, srcStride);\
  1051. FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
  1052. }\
  1053. \
  1054. static void FUNC(OPNAME ## h264_qpel16_hv_lowpass)(uint8_t *dst, pixeltmp *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  1055. FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst , tmp , src , dstStride, tmpStride, srcStride);\
  1056. FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst+8*sizeof(pixel), tmp+8, src+8*sizeof(pixel), dstStride, tmpStride, srcStride);\
  1057. src += 8*srcStride;\
  1058. dst += 8*dstStride;\
  1059. FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst , tmp , src , dstStride, tmpStride, srcStride);\
  1060. FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst+8*sizeof(pixel), tmp+8, src+8*sizeof(pixel), dstStride, tmpStride, srcStride);\
  1061. }\
  1062. #define H264_MC(OPNAME, SIZE) \
  1063. static av_unused void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc00)(uint8_t *dst, uint8_t *src, int stride){\
  1064. FUNCC(OPNAME ## pixels ## SIZE)(dst, src, stride, SIZE);\
  1065. }\
  1066. \
  1067. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc10)(uint8_t *dst, uint8_t *src, int stride){\
  1068. uint8_t half[SIZE*SIZE*sizeof(pixel)];\
  1069. FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(half, src, SIZE*sizeof(pixel), stride);\
  1070. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, src, half, stride, stride, SIZE*sizeof(pixel), SIZE);\
  1071. }\
  1072. \
  1073. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc20)(uint8_t *dst, uint8_t *src, int stride){\
  1074. FUNC(OPNAME ## h264_qpel ## SIZE ## _h_lowpass)(dst, src, stride, stride);\
  1075. }\
  1076. \
  1077. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc30)(uint8_t *dst, uint8_t *src, int stride){\
  1078. uint8_t half[SIZE*SIZE*sizeof(pixel)];\
  1079. FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(half, src, SIZE*sizeof(pixel), stride);\
  1080. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, src+sizeof(pixel), half, stride, stride, SIZE*sizeof(pixel), SIZE);\
  1081. }\
  1082. \
  1083. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc01)(uint8_t *dst, uint8_t *src, int stride){\
  1084. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1085. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1086. uint8_t half[SIZE*SIZE*sizeof(pixel)];\
  1087. FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
  1088. FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(half, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
  1089. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, full_mid, half, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1090. }\
  1091. \
  1092. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc02)(uint8_t *dst, uint8_t *src, int stride){\
  1093. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1094. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1095. FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
  1096. FUNC(OPNAME ## h264_qpel ## SIZE ## _v_lowpass)(dst, full_mid, stride, SIZE*sizeof(pixel));\
  1097. }\
  1098. \
  1099. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc03)(uint8_t *dst, uint8_t *src, int stride){\
  1100. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1101. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1102. uint8_t half[SIZE*SIZE*sizeof(pixel)];\
  1103. FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
  1104. FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(half, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
  1105. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, full_mid+SIZE*sizeof(pixel), half, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1106. }\
  1107. \
  1108. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc11)(uint8_t *dst, uint8_t *src, int stride){\
  1109. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1110. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1111. uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
  1112. uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
  1113. FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
  1114. FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
  1115. FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
  1116. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1117. }\
  1118. \
  1119. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc31)(uint8_t *dst, uint8_t *src, int stride){\
  1120. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1121. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1122. uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
  1123. uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
  1124. FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
  1125. FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel), stride, SIZE + 5);\
  1126. FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
  1127. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1128. }\
  1129. \
  1130. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc13)(uint8_t *dst, uint8_t *src, int stride){\
  1131. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1132. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1133. uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
  1134. uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
  1135. FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
  1136. FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
  1137. FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
  1138. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1139. }\
  1140. \
  1141. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc33)(uint8_t *dst, uint8_t *src, int stride){\
  1142. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1143. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1144. uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
  1145. uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
  1146. FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
  1147. FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel), stride, SIZE + 5);\
  1148. FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
  1149. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1150. }\
  1151. \
  1152. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc22)(uint8_t *dst, uint8_t *src, int stride){\
  1153. pixeltmp tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
  1154. FUNC(OPNAME ## h264_qpel ## SIZE ## _hv_lowpass)(dst, tmp, src, stride, SIZE*sizeof(pixel), stride);\
  1155. }\
  1156. \
  1157. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc21)(uint8_t *dst, uint8_t *src, int stride){\
  1158. pixeltmp tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
  1159. uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
  1160. uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
  1161. FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
  1162. FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
  1163. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1164. }\
  1165. \
  1166. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc23)(uint8_t *dst, uint8_t *src, int stride){\
  1167. pixeltmp tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
  1168. uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
  1169. uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
  1170. FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
  1171. FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
  1172. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1173. }\
  1174. \
  1175. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc12)(uint8_t *dst, uint8_t *src, int stride){\
  1176. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1177. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1178. pixeltmp tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
  1179. uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
  1180. uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
  1181. FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
  1182. FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
  1183. FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
  1184. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfV, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1185. }\
  1186. \
  1187. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc32)(uint8_t *dst, uint8_t *src, int stride){\
  1188. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1189. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1190. pixeltmp tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
  1191. uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
  1192. uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
  1193. FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel), stride, SIZE + 5);\
  1194. FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
  1195. FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
  1196. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfV, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1197. }\
  1198. #define op_avg(a, b) a = (((a)+CLIP(((b) + 16)>>5)+1)>>1)
  1199. //#define op_avg2(a, b) a = (((a)*w1+cm[((b) + 16)>>5]*w2 + o + 64)>>7)
  1200. #define op_put(a, b) a = CLIP(((b) + 16)>>5)
  1201. #define op2_avg(a, b) a = (((a)+CLIP(((b) + 512)>>10)+1)>>1)
  1202. #define op2_put(a, b) a = CLIP(((b) + 512)>>10)
  1203. H264_LOWPASS(put_ , op_put, op2_put)
  1204. H264_LOWPASS(avg_ , op_avg, op2_avg)
  1205. H264_MC(put_, 2)
  1206. H264_MC(put_, 4)
  1207. H264_MC(put_, 8)
  1208. H264_MC(put_, 16)
  1209. H264_MC(avg_, 4)
  1210. H264_MC(avg_, 8)
  1211. H264_MC(avg_, 16)
  1212. #undef op_avg
  1213. #undef op_put
  1214. #undef op2_avg
  1215. #undef op2_put
  1216. #if BIT_DEPTH == 8
  1217. # define put_h264_qpel8_mc00_8_c ff_put_pixels8x8_8_c
  1218. # define avg_h264_qpel8_mc00_8_c ff_avg_pixels8x8_8_c
  1219. # define put_h264_qpel16_mc00_8_c ff_put_pixels16x16_8_c
  1220. # define avg_h264_qpel16_mc00_8_c ff_avg_pixels16x16_8_c
  1221. #elif BIT_DEPTH == 9
  1222. # define put_h264_qpel8_mc00_9_c ff_put_pixels8x8_9_c
  1223. # define avg_h264_qpel8_mc00_9_c ff_avg_pixels8x8_9_c
  1224. # define put_h264_qpel16_mc00_9_c ff_put_pixels16x16_9_c
  1225. # define avg_h264_qpel16_mc00_9_c ff_avg_pixels16x16_9_c
  1226. #elif BIT_DEPTH == 10
  1227. # define put_h264_qpel8_mc00_10_c ff_put_pixels8x8_10_c
  1228. # define avg_h264_qpel8_mc00_10_c ff_avg_pixels8x8_10_c
  1229. # define put_h264_qpel16_mc00_10_c ff_put_pixels16x16_10_c
  1230. # define avg_h264_qpel16_mc00_10_c ff_avg_pixels16x16_10_c
  1231. #elif BIT_DEPTH == 12
  1232. # define put_h264_qpel8_mc00_12_c ff_put_pixels8x8_12_c
  1233. # define avg_h264_qpel8_mc00_12_c ff_avg_pixels8x8_12_c
  1234. # define put_h264_qpel16_mc00_12_c ff_put_pixels16x16_12_c
  1235. # define avg_h264_qpel16_mc00_12_c ff_avg_pixels16x16_12_c
  1236. #elif BIT_DEPTH == 14
  1237. # define put_h264_qpel8_mc00_14_c ff_put_pixels8x8_14_c
  1238. # define avg_h264_qpel8_mc00_14_c ff_avg_pixels8x8_14_c
  1239. # define put_h264_qpel16_mc00_14_c ff_put_pixels16x16_14_c
  1240. # define avg_h264_qpel16_mc00_14_c ff_avg_pixels16x16_14_c
  1241. #endif
  1242. void FUNCC(ff_put_pixels8x8)(uint8_t *dst, uint8_t *src, int stride) {
  1243. FUNCC(put_pixels8)(dst, src, stride, 8);
  1244. }
  1245. void FUNCC(ff_avg_pixels8x8)(uint8_t *dst, uint8_t *src, int stride) {
  1246. FUNCC(avg_pixels8)(dst, src, stride, 8);
  1247. }
  1248. void FUNCC(ff_put_pixels16x16)(uint8_t *dst, uint8_t *src, int stride) {
  1249. FUNCC(put_pixels16)(dst, src, stride, 16);
  1250. }
  1251. void FUNCC(ff_avg_pixels16x16)(uint8_t *dst, uint8_t *src, int stride) {
  1252. FUNCC(avg_pixels16)(dst, src, stride, 16);
  1253. }