You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1296 lines
56KB

  1. /*
  2. * DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * gmc & q-pel & 32/64 bit based MC by Michael Niedermayer <michaelni@gmx.at>
  7. *
  8. * This file is part of FFmpeg.
  9. *
  10. * FFmpeg is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * FFmpeg is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with FFmpeg; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. /**
  25. * @file
  26. * DSP utils
  27. */
  28. #include "bit_depth_template.c"
  29. static inline void FUNC(copy_block2)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
  30. {
  31. int i;
  32. for(i=0; i<h; i++)
  33. {
  34. AV_WN2P(dst , AV_RN2P(src ));
  35. dst+=dstStride;
  36. src+=srcStride;
  37. }
  38. }
  39. static inline void FUNC(copy_block4)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
  40. {
  41. int i;
  42. for(i=0; i<h; i++)
  43. {
  44. AV_WN4P(dst , AV_RN4P(src ));
  45. dst+=dstStride;
  46. src+=srcStride;
  47. }
  48. }
  49. static inline void FUNC(copy_block8)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
  50. {
  51. int i;
  52. for(i=0; i<h; i++)
  53. {
  54. AV_WN4P(dst , AV_RN4P(src ));
  55. AV_WN4P(dst+4*sizeof(pixel), AV_RN4P(src+4*sizeof(pixel)));
  56. dst+=dstStride;
  57. src+=srcStride;
  58. }
  59. }
  60. static inline void FUNC(copy_block16)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
  61. {
  62. int i;
  63. for(i=0; i<h; i++)
  64. {
  65. AV_WN4P(dst , AV_RN4P(src ));
  66. AV_WN4P(dst+ 4*sizeof(pixel), AV_RN4P(src+ 4*sizeof(pixel)));
  67. AV_WN4P(dst+ 8*sizeof(pixel), AV_RN4P(src+ 8*sizeof(pixel)));
  68. AV_WN4P(dst+12*sizeof(pixel), AV_RN4P(src+12*sizeof(pixel)));
  69. dst+=dstStride;
  70. src+=srcStride;
  71. }
  72. }
  73. /* draw the edges of width 'w' of an image of size width, height */
  74. //FIXME check that this is ok for mpeg4 interlaced
  75. static void FUNCC(draw_edges)(uint8_t *p_buf, int p_wrap, int width, int height, int w, int h, int sides)
  76. {
  77. pixel *buf = (pixel*)p_buf;
  78. int wrap = p_wrap / sizeof(pixel);
  79. pixel *ptr, *last_line;
  80. int i;
  81. /* left and right */
  82. ptr = buf;
  83. for(i=0;i<height;i++) {
  84. #if BIT_DEPTH > 8
  85. int j;
  86. for (j = 0; j < w; j++) {
  87. ptr[j-w] = ptr[0];
  88. ptr[j+width] = ptr[width-1];
  89. }
  90. #else
  91. memset(ptr - w, ptr[0], w);
  92. memset(ptr + width, ptr[width-1], w);
  93. #endif
  94. ptr += wrap;
  95. }
  96. /* top and bottom + corners */
  97. buf -= w;
  98. last_line = buf + (height - 1) * wrap;
  99. if (sides & EDGE_TOP)
  100. for(i = 0; i < h; i++)
  101. memcpy(buf - (i + 1) * wrap, buf, (width + w + w) * sizeof(pixel)); // top
  102. if (sides & EDGE_BOTTOM)
  103. for (i = 0; i < h; i++)
  104. memcpy(last_line + (i + 1) * wrap, last_line, (width + w + w) * sizeof(pixel)); // bottom
  105. }
  106. /**
  107. * Copy a rectangular area of samples to a temporary buffer and replicate the border samples.
  108. * @param buf destination buffer
  109. * @param src source buffer
  110. * @param linesize number of bytes between 2 vertically adjacent samples in both the source and destination buffers
  111. * @param block_w width of block
  112. * @param block_h height of block
  113. * @param src_x x coordinate of the top left sample of the block in the source buffer
  114. * @param src_y y coordinate of the top left sample of the block in the source buffer
  115. * @param w width of the source buffer
  116. * @param h height of the source buffer
  117. */
  118. void FUNC(ff_emulated_edge_mc)(uint8_t *buf, const uint8_t *src, int linesize, int block_w, int block_h,
  119. int src_x, int src_y, int w, int h){
  120. int x, y;
  121. int start_y, start_x, end_y, end_x;
  122. if(!w || !h)
  123. return;
  124. if(src_y>= h){
  125. src-= src_y*linesize;
  126. src+= (h-1)*linesize;
  127. src_y=h-1;
  128. }else if(src_y<=-block_h){
  129. src-= src_y*linesize;
  130. src+= (1-block_h)*linesize;
  131. src_y=1-block_h;
  132. }
  133. if(src_x>= w){
  134. src+= (w-1-src_x)*sizeof(pixel);
  135. src_x=w-1;
  136. }else if(src_x<=-block_w){
  137. src+= (1-block_w-src_x)*sizeof(pixel);
  138. src_x=1-block_w;
  139. }
  140. start_y= FFMAX(0, -src_y);
  141. start_x= FFMAX(0, -src_x);
  142. end_y= FFMIN(block_h, h-src_y);
  143. end_x= FFMIN(block_w, w-src_x);
  144. av_assert2(start_y < end_y && block_h);
  145. av_assert2(start_x < end_x && block_w);
  146. w = end_x - start_x;
  147. src += start_y*linesize + start_x*sizeof(pixel);
  148. buf += start_x*sizeof(pixel);
  149. //top
  150. for(y=0; y<start_y; y++){
  151. memcpy(buf, src, w*sizeof(pixel));
  152. buf += linesize;
  153. }
  154. // copy existing part
  155. for(; y<end_y; y++){
  156. memcpy(buf, src, w*sizeof(pixel));
  157. src += linesize;
  158. buf += linesize;
  159. }
  160. //bottom
  161. src -= linesize;
  162. for(; y<block_h; y++){
  163. memcpy(buf, src, w*sizeof(pixel));
  164. buf += linesize;
  165. }
  166. buf -= block_h * linesize + start_x*sizeof(pixel);
  167. while (block_h--){
  168. pixel *bufp = (pixel*)buf;
  169. //left
  170. for(x=0; x<start_x; x++){
  171. bufp[x] = bufp[start_x];
  172. }
  173. //right
  174. for(x=end_x; x<block_w; x++){
  175. bufp[x] = bufp[end_x - 1];
  176. }
  177. buf += linesize;
  178. }
  179. }
  180. #define DCTELEM_FUNCS(dctcoef, suffix) \
  181. static void FUNCC(get_pixels ## suffix)(DCTELEM *av_restrict _block, \
  182. const uint8_t *_pixels, \
  183. int line_size) \
  184. { \
  185. const pixel *pixels = (const pixel *) _pixels; \
  186. dctcoef *av_restrict block = (dctcoef *) _block; \
  187. int i; \
  188. \
  189. /* read the pixels */ \
  190. for(i=0;i<8;i++) { \
  191. block[0] = pixels[0]; \
  192. block[1] = pixels[1]; \
  193. block[2] = pixels[2]; \
  194. block[3] = pixels[3]; \
  195. block[4] = pixels[4]; \
  196. block[5] = pixels[5]; \
  197. block[6] = pixels[6]; \
  198. block[7] = pixels[7]; \
  199. pixels += line_size / sizeof(pixel); \
  200. block += 8; \
  201. } \
  202. } \
  203. \
  204. static void FUNCC(add_pixels8 ## suffix)(uint8_t *av_restrict _pixels, \
  205. DCTELEM *_block, \
  206. int line_size) \
  207. { \
  208. int i; \
  209. pixel *av_restrict pixels = (pixel *av_restrict)_pixels; \
  210. dctcoef *block = (dctcoef*)_block; \
  211. line_size /= sizeof(pixel); \
  212. \
  213. for(i=0;i<8;i++) { \
  214. pixels[0] += block[0]; \
  215. pixels[1] += block[1]; \
  216. pixels[2] += block[2]; \
  217. pixels[3] += block[3]; \
  218. pixels[4] += block[4]; \
  219. pixels[5] += block[5]; \
  220. pixels[6] += block[6]; \
  221. pixels[7] += block[7]; \
  222. pixels += line_size; \
  223. block += 8; \
  224. } \
  225. } \
  226. \
  227. static void FUNCC(add_pixels4 ## suffix)(uint8_t *av_restrict _pixels, \
  228. DCTELEM *_block, \
  229. int line_size) \
  230. { \
  231. int i; \
  232. pixel *av_restrict pixels = (pixel *av_restrict)_pixels; \
  233. dctcoef *block = (dctcoef*)_block; \
  234. line_size /= sizeof(pixel); \
  235. \
  236. for(i=0;i<4;i++) { \
  237. pixels[0] += block[0]; \
  238. pixels[1] += block[1]; \
  239. pixels[2] += block[2]; \
  240. pixels[3] += block[3]; \
  241. pixels += line_size; \
  242. block += 4; \
  243. } \
  244. } \
  245. \
  246. static void FUNCC(clear_block ## suffix)(DCTELEM *block) \
  247. { \
  248. memset(block, 0, sizeof(dctcoef)*64); \
  249. } \
  250. \
  251. /** \
  252. * memset(blocks, 0, sizeof(DCTELEM)*6*64) \
  253. */ \
  254. static void FUNCC(clear_blocks ## suffix)(DCTELEM *blocks) \
  255. { \
  256. memset(blocks, 0, sizeof(dctcoef)*6*64); \
  257. }
  258. DCTELEM_FUNCS(DCTELEM, _16)
  259. #if BIT_DEPTH > 8
  260. DCTELEM_FUNCS(dctcoef, _32)
  261. #endif
  262. #define PIXOP2(OPNAME, OP) \
  263. static void FUNCC(OPNAME ## _pixels2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  264. int i;\
  265. for(i=0; i<h; i++){\
  266. OP(*((pixel2*)(block )), AV_RN2P(pixels ));\
  267. pixels+=line_size;\
  268. block +=line_size;\
  269. }\
  270. }\
  271. static void FUNCC(OPNAME ## _pixels4)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  272. int i;\
  273. for(i=0; i<h; i++){\
  274. OP(*((pixel4*)(block )), AV_RN4P(pixels ));\
  275. pixels+=line_size;\
  276. block +=line_size;\
  277. }\
  278. }\
  279. static void FUNCC(OPNAME ## _pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  280. int i;\
  281. for(i=0; i<h; i++){\
  282. OP(*((pixel4*)(block )), AV_RN4P(pixels ));\
  283. OP(*((pixel4*)(block+4*sizeof(pixel))), AV_RN4P(pixels+4*sizeof(pixel)));\
  284. pixels+=line_size;\
  285. block +=line_size;\
  286. }\
  287. }\
  288. static inline void FUNCC(OPNAME ## _no_rnd_pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  289. FUNCC(OPNAME ## _pixels8)(block, pixels, line_size, h);\
  290. }\
  291. \
  292. static inline void FUNC(OPNAME ## _no_rnd_pixels8_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  293. int src_stride1, int src_stride2, int h){\
  294. int i;\
  295. for(i=0; i<h; i++){\
  296. pixel4 a,b;\
  297. a= AV_RN4P(&src1[i*src_stride1 ]);\
  298. b= AV_RN4P(&src2[i*src_stride2 ]);\
  299. OP(*((pixel4*)&dst[i*dst_stride ]), no_rnd_avg_pixel4(a, b));\
  300. a= AV_RN4P(&src1[i*src_stride1+4*sizeof(pixel)]);\
  301. b= AV_RN4P(&src2[i*src_stride2+4*sizeof(pixel)]);\
  302. OP(*((pixel4*)&dst[i*dst_stride+4*sizeof(pixel)]), no_rnd_avg_pixel4(a, b));\
  303. }\
  304. }\
  305. \
  306. static inline void FUNC(OPNAME ## _pixels8_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  307. int src_stride1, int src_stride2, int h){\
  308. int i;\
  309. for(i=0; i<h; i++){\
  310. pixel4 a,b;\
  311. a= AV_RN4P(&src1[i*src_stride1 ]);\
  312. b= AV_RN4P(&src2[i*src_stride2 ]);\
  313. OP(*((pixel4*)&dst[i*dst_stride ]), rnd_avg_pixel4(a, b));\
  314. a= AV_RN4P(&src1[i*src_stride1+4*sizeof(pixel)]);\
  315. b= AV_RN4P(&src2[i*src_stride2+4*sizeof(pixel)]);\
  316. OP(*((pixel4*)&dst[i*dst_stride+4*sizeof(pixel)]), rnd_avg_pixel4(a, b));\
  317. }\
  318. }\
  319. \
  320. static inline void FUNC(OPNAME ## _pixels4_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  321. int src_stride1, int src_stride2, int h){\
  322. int i;\
  323. for(i=0; i<h; i++){\
  324. pixel4 a,b;\
  325. a= AV_RN4P(&src1[i*src_stride1 ]);\
  326. b= AV_RN4P(&src2[i*src_stride2 ]);\
  327. OP(*((pixel4*)&dst[i*dst_stride ]), rnd_avg_pixel4(a, b));\
  328. }\
  329. }\
  330. \
  331. static inline void FUNC(OPNAME ## _pixels2_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  332. int src_stride1, int src_stride2, int h){\
  333. int i;\
  334. for(i=0; i<h; i++){\
  335. pixel4 a,b;\
  336. a= AV_RN2P(&src1[i*src_stride1 ]);\
  337. b= AV_RN2P(&src2[i*src_stride2 ]);\
  338. OP(*((pixel2*)&dst[i*dst_stride ]), rnd_avg_pixel4(a, b));\
  339. }\
  340. }\
  341. \
  342. static inline void FUNC(OPNAME ## _pixels16_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  343. int src_stride1, int src_stride2, int h){\
  344. FUNC(OPNAME ## _pixels8_l2)(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);\
  345. FUNC(OPNAME ## _pixels8_l2)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, h);\
  346. }\
  347. \
  348. static inline void FUNC(OPNAME ## _no_rnd_pixels16_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  349. int src_stride1, int src_stride2, int h){\
  350. FUNC(OPNAME ## _no_rnd_pixels8_l2)(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);\
  351. FUNC(OPNAME ## _no_rnd_pixels8_l2)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, h);\
  352. }\
  353. \
  354. static inline void FUNCC(OPNAME ## _no_rnd_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  355. FUNC(OPNAME ## _no_rnd_pixels8_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
  356. }\
  357. \
  358. static inline void FUNCC(OPNAME ## _pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  359. FUNC(OPNAME ## _pixels8_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
  360. }\
  361. \
  362. static inline void FUNCC(OPNAME ## _no_rnd_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  363. FUNC(OPNAME ## _no_rnd_pixels8_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
  364. }\
  365. \
  366. static inline void FUNCC(OPNAME ## _pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  367. FUNC(OPNAME ## _pixels8_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
  368. }\
  369. \
  370. static inline void FUNC(OPNAME ## _pixels8_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
  371. int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  372. /* FIXME HIGH BIT DEPTH */\
  373. int i;\
  374. for(i=0; i<h; i++){\
  375. uint32_t a, b, c, d, l0, l1, h0, h1;\
  376. a= AV_RN32(&src1[i*src_stride1]);\
  377. b= AV_RN32(&src2[i*src_stride2]);\
  378. c= AV_RN32(&src3[i*src_stride3]);\
  379. d= AV_RN32(&src4[i*src_stride4]);\
  380. l0= (a&0x03030303UL)\
  381. + (b&0x03030303UL)\
  382. + 0x02020202UL;\
  383. h0= ((a&0xFCFCFCFCUL)>>2)\
  384. + ((b&0xFCFCFCFCUL)>>2);\
  385. l1= (c&0x03030303UL)\
  386. + (d&0x03030303UL);\
  387. h1= ((c&0xFCFCFCFCUL)>>2)\
  388. + ((d&0xFCFCFCFCUL)>>2);\
  389. OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  390. a= AV_RN32(&src1[i*src_stride1+4]);\
  391. b= AV_RN32(&src2[i*src_stride2+4]);\
  392. c= AV_RN32(&src3[i*src_stride3+4]);\
  393. d= AV_RN32(&src4[i*src_stride4+4]);\
  394. l0= (a&0x03030303UL)\
  395. + (b&0x03030303UL)\
  396. + 0x02020202UL;\
  397. h0= ((a&0xFCFCFCFCUL)>>2)\
  398. + ((b&0xFCFCFCFCUL)>>2);\
  399. l1= (c&0x03030303UL)\
  400. + (d&0x03030303UL);\
  401. h1= ((c&0xFCFCFCFCUL)>>2)\
  402. + ((d&0xFCFCFCFCUL)>>2);\
  403. OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  404. }\
  405. }\
  406. \
  407. static inline void FUNCC(OPNAME ## _pixels4_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  408. FUNC(OPNAME ## _pixels4_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
  409. }\
  410. \
  411. static inline void FUNCC(OPNAME ## _pixels4_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  412. FUNC(OPNAME ## _pixels4_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
  413. }\
  414. \
  415. static inline void FUNCC(OPNAME ## _pixels2_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  416. FUNC(OPNAME ## _pixels2_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
  417. }\
  418. \
  419. static inline void FUNCC(OPNAME ## _pixels2_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  420. FUNC(OPNAME ## _pixels2_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
  421. }\
  422. \
  423. static inline void FUNC(OPNAME ## _no_rnd_pixels8_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
  424. int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  425. /* FIXME HIGH BIT DEPTH*/\
  426. int i;\
  427. for(i=0; i<h; i++){\
  428. uint32_t a, b, c, d, l0, l1, h0, h1;\
  429. a= AV_RN32(&src1[i*src_stride1]);\
  430. b= AV_RN32(&src2[i*src_stride2]);\
  431. c= AV_RN32(&src3[i*src_stride3]);\
  432. d= AV_RN32(&src4[i*src_stride4]);\
  433. l0= (a&0x03030303UL)\
  434. + (b&0x03030303UL)\
  435. + 0x01010101UL;\
  436. h0= ((a&0xFCFCFCFCUL)>>2)\
  437. + ((b&0xFCFCFCFCUL)>>2);\
  438. l1= (c&0x03030303UL)\
  439. + (d&0x03030303UL);\
  440. h1= ((c&0xFCFCFCFCUL)>>2)\
  441. + ((d&0xFCFCFCFCUL)>>2);\
  442. OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  443. a= AV_RN32(&src1[i*src_stride1+4]);\
  444. b= AV_RN32(&src2[i*src_stride2+4]);\
  445. c= AV_RN32(&src3[i*src_stride3+4]);\
  446. d= AV_RN32(&src4[i*src_stride4+4]);\
  447. l0= (a&0x03030303UL)\
  448. + (b&0x03030303UL)\
  449. + 0x01010101UL;\
  450. h0= ((a&0xFCFCFCFCUL)>>2)\
  451. + ((b&0xFCFCFCFCUL)>>2);\
  452. l1= (c&0x03030303UL)\
  453. + (d&0x03030303UL);\
  454. h1= ((c&0xFCFCFCFCUL)>>2)\
  455. + ((d&0xFCFCFCFCUL)>>2);\
  456. OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  457. }\
  458. }\
  459. static inline void FUNC(OPNAME ## _pixels16_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
  460. int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  461. FUNC(OPNAME ## _pixels8_l4)(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
  462. FUNC(OPNAME ## _pixels8_l4)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), src3+8*sizeof(pixel), src4+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
  463. }\
  464. static inline void FUNC(OPNAME ## _no_rnd_pixels16_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
  465. int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  466. FUNC(OPNAME ## _no_rnd_pixels8_l4)(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
  467. FUNC(OPNAME ## _no_rnd_pixels8_l4)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), src3+8*sizeof(pixel), src4+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
  468. }\
  469. \
  470. static inline void FUNCC(OPNAME ## _pixels2_xy2)(uint8_t *p_block, const uint8_t *p_pixels, int line_size, int h)\
  471. {\
  472. int i, a0, b0, a1, b1;\
  473. pixel *block = (pixel*)p_block;\
  474. const pixel *pixels = (const pixel*)p_pixels;\
  475. line_size >>= sizeof(pixel)-1;\
  476. a0= pixels[0];\
  477. b0= pixels[1] + 2;\
  478. a0 += b0;\
  479. b0 += pixels[2];\
  480. \
  481. pixels+=line_size;\
  482. for(i=0; i<h; i+=2){\
  483. a1= pixels[0];\
  484. b1= pixels[1];\
  485. a1 += b1;\
  486. b1 += pixels[2];\
  487. \
  488. block[0]= (a1+a0)>>2; /* FIXME non put */\
  489. block[1]= (b1+b0)>>2;\
  490. \
  491. pixels+=line_size;\
  492. block +=line_size;\
  493. \
  494. a0= pixels[0];\
  495. b0= pixels[1] + 2;\
  496. a0 += b0;\
  497. b0 += pixels[2];\
  498. \
  499. block[0]= (a1+a0)>>2;\
  500. block[1]= (b1+b0)>>2;\
  501. pixels+=line_size;\
  502. block +=line_size;\
  503. }\
  504. }\
  505. \
  506. static inline void FUNCC(OPNAME ## _pixels4_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  507. {\
  508. /* FIXME HIGH BIT DEPTH */\
  509. int i;\
  510. const uint32_t a= AV_RN32(pixels );\
  511. const uint32_t b= AV_RN32(pixels+1);\
  512. uint32_t l0= (a&0x03030303UL)\
  513. + (b&0x03030303UL)\
  514. + 0x02020202UL;\
  515. uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
  516. + ((b&0xFCFCFCFCUL)>>2);\
  517. uint32_t l1,h1;\
  518. \
  519. pixels+=line_size;\
  520. for(i=0; i<h; i+=2){\
  521. uint32_t a= AV_RN32(pixels );\
  522. uint32_t b= AV_RN32(pixels+1);\
  523. l1= (a&0x03030303UL)\
  524. + (b&0x03030303UL);\
  525. h1= ((a&0xFCFCFCFCUL)>>2)\
  526. + ((b&0xFCFCFCFCUL)>>2);\
  527. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  528. pixels+=line_size;\
  529. block +=line_size;\
  530. a= AV_RN32(pixels );\
  531. b= AV_RN32(pixels+1);\
  532. l0= (a&0x03030303UL)\
  533. + (b&0x03030303UL)\
  534. + 0x02020202UL;\
  535. h0= ((a&0xFCFCFCFCUL)>>2)\
  536. + ((b&0xFCFCFCFCUL)>>2);\
  537. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  538. pixels+=line_size;\
  539. block +=line_size;\
  540. }\
  541. }\
  542. \
  543. static inline void FUNCC(OPNAME ## _pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  544. {\
  545. /* FIXME HIGH BIT DEPTH */\
  546. int j;\
  547. for(j=0; j<2; j++){\
  548. int i;\
  549. const uint32_t a= AV_RN32(pixels );\
  550. const uint32_t b= AV_RN32(pixels+1);\
  551. uint32_t l0= (a&0x03030303UL)\
  552. + (b&0x03030303UL)\
  553. + 0x02020202UL;\
  554. uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
  555. + ((b&0xFCFCFCFCUL)>>2);\
  556. uint32_t l1,h1;\
  557. \
  558. pixels+=line_size;\
  559. for(i=0; i<h; i+=2){\
  560. uint32_t a= AV_RN32(pixels );\
  561. uint32_t b= AV_RN32(pixels+1);\
  562. l1= (a&0x03030303UL)\
  563. + (b&0x03030303UL);\
  564. h1= ((a&0xFCFCFCFCUL)>>2)\
  565. + ((b&0xFCFCFCFCUL)>>2);\
  566. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  567. pixels+=line_size;\
  568. block +=line_size;\
  569. a= AV_RN32(pixels );\
  570. b= AV_RN32(pixels+1);\
  571. l0= (a&0x03030303UL)\
  572. + (b&0x03030303UL)\
  573. + 0x02020202UL;\
  574. h0= ((a&0xFCFCFCFCUL)>>2)\
  575. + ((b&0xFCFCFCFCUL)>>2);\
  576. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  577. pixels+=line_size;\
  578. block +=line_size;\
  579. }\
  580. pixels+=4-line_size*(h+1);\
  581. block +=4-line_size*h;\
  582. }\
  583. }\
  584. \
  585. static inline void FUNCC(OPNAME ## _no_rnd_pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  586. {\
  587. /* FIXME HIGH BIT DEPTH */\
  588. int j;\
  589. for(j=0; j<2; j++){\
  590. int i;\
  591. const uint32_t a= AV_RN32(pixels );\
  592. const uint32_t b= AV_RN32(pixels+1);\
  593. uint32_t l0= (a&0x03030303UL)\
  594. + (b&0x03030303UL)\
  595. + 0x01010101UL;\
  596. uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
  597. + ((b&0xFCFCFCFCUL)>>2);\
  598. uint32_t l1,h1;\
  599. \
  600. pixels+=line_size;\
  601. for(i=0; i<h; i+=2){\
  602. uint32_t a= AV_RN32(pixels );\
  603. uint32_t b= AV_RN32(pixels+1);\
  604. l1= (a&0x03030303UL)\
  605. + (b&0x03030303UL);\
  606. h1= ((a&0xFCFCFCFCUL)>>2)\
  607. + ((b&0xFCFCFCFCUL)>>2);\
  608. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  609. pixels+=line_size;\
  610. block +=line_size;\
  611. a= AV_RN32(pixels );\
  612. b= AV_RN32(pixels+1);\
  613. l0= (a&0x03030303UL)\
  614. + (b&0x03030303UL)\
  615. + 0x01010101UL;\
  616. h0= ((a&0xFCFCFCFCUL)>>2)\
  617. + ((b&0xFCFCFCFCUL)>>2);\
  618. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  619. pixels+=line_size;\
  620. block +=line_size;\
  621. }\
  622. pixels+=4-line_size*(h+1);\
  623. block +=4-line_size*h;\
  624. }\
  625. }\
  626. \
  627. CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16) , FUNCC(OPNAME ## _pixels8) , 8*sizeof(pixel))\
  628. CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_x2) , FUNCC(OPNAME ## _pixels8_x2) , 8*sizeof(pixel))\
  629. CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_y2) , FUNCC(OPNAME ## _pixels8_y2) , 8*sizeof(pixel))\
  630. CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_xy2), FUNCC(OPNAME ## _pixels8_xy2), 8*sizeof(pixel))\
  631. av_unused CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16) , FUNCC(OPNAME ## _pixels8) , 8*sizeof(pixel))\
  632. CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_x2) , FUNCC(OPNAME ## _no_rnd_pixels8_x2) , 8*sizeof(pixel))\
  633. CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_y2) , FUNCC(OPNAME ## _no_rnd_pixels8_y2) , 8*sizeof(pixel))\
  634. CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_xy2), FUNCC(OPNAME ## _no_rnd_pixels8_xy2), 8*sizeof(pixel))\
  635. #define op_avg(a, b) a = rnd_avg_pixel4(a, b)
  636. #define op_put(a, b) a = b
  637. PIXOP2(avg, op_avg)
  638. PIXOP2(put, op_put)
  639. #undef op_avg
  640. #undef op_put
  641. #define put_no_rnd_pixels8_c put_pixels8_c
  642. #define put_no_rnd_pixels16_c put_pixels16_c
  643. static void FUNCC(put_no_rnd_pixels16_l2)(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h){
  644. FUNC(put_no_rnd_pixels16_l2)(dst, a, b, stride, stride, stride, h);
  645. }
  646. static void FUNCC(put_no_rnd_pixels8_l2)(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h){
  647. FUNC(put_no_rnd_pixels8_l2)(dst, a, b, stride, stride, stride, h);
  648. }
  649. #define H264_CHROMA_MC(OPNAME, OP)\
  650. static void FUNCC(OPNAME ## h264_chroma_mc2)(uint8_t *p_dst/*align 8*/, uint8_t *p_src/*align 1*/, int stride, int h, int x, int y){\
  651. pixel *dst = (pixel*)p_dst;\
  652. pixel *src = (pixel*)p_src;\
  653. const int A=(8-x)*(8-y);\
  654. const int B=( x)*(8-y);\
  655. const int C=(8-x)*( y);\
  656. const int D=( x)*( y);\
  657. int i;\
  658. stride >>= sizeof(pixel)-1;\
  659. \
  660. av_assert2(x<8 && y<8 && x>=0 && y>=0);\
  661. \
  662. if(D){\
  663. for(i=0; i<h; i++){\
  664. OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
  665. OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
  666. dst+= stride;\
  667. src+= stride;\
  668. }\
  669. }else{\
  670. const int E= B+C;\
  671. const int step= C ? stride : 1;\
  672. for(i=0; i<h; i++){\
  673. OP(dst[0], (A*src[0] + E*src[step+0]));\
  674. OP(dst[1], (A*src[1] + E*src[step+1]));\
  675. dst+= stride;\
  676. src+= stride;\
  677. }\
  678. }\
  679. }\
  680. \
  681. static void FUNCC(OPNAME ## h264_chroma_mc4)(uint8_t *p_dst/*align 8*/, uint8_t *p_src/*align 1*/, int stride, int h, int x, int y){\
  682. pixel *dst = (pixel*)p_dst;\
  683. pixel *src = (pixel*)p_src;\
  684. const int A=(8-x)*(8-y);\
  685. const int B=( x)*(8-y);\
  686. const int C=(8-x)*( y);\
  687. const int D=( x)*( y);\
  688. int i;\
  689. stride >>= sizeof(pixel)-1;\
  690. \
  691. av_assert2(x<8 && y<8 && x>=0 && y>=0);\
  692. \
  693. if(D){\
  694. for(i=0; i<h; i++){\
  695. OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
  696. OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
  697. OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
  698. OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
  699. dst+= stride;\
  700. src+= stride;\
  701. }\
  702. }else{\
  703. const int E= B+C;\
  704. const int step= C ? stride : 1;\
  705. for(i=0; i<h; i++){\
  706. OP(dst[0], (A*src[0] + E*src[step+0]));\
  707. OP(dst[1], (A*src[1] + E*src[step+1]));\
  708. OP(dst[2], (A*src[2] + E*src[step+2]));\
  709. OP(dst[3], (A*src[3] + E*src[step+3]));\
  710. dst+= stride;\
  711. src+= stride;\
  712. }\
  713. }\
  714. }\
  715. \
  716. static void FUNCC(OPNAME ## h264_chroma_mc8)(uint8_t *p_dst/*align 8*/, uint8_t *p_src/*align 1*/, int stride, int h, int x, int y){\
  717. pixel *dst = (pixel*)p_dst;\
  718. pixel *src = (pixel*)p_src;\
  719. const int A=(8-x)*(8-y);\
  720. const int B=( x)*(8-y);\
  721. const int C=(8-x)*( y);\
  722. const int D=( x)*( y);\
  723. int i;\
  724. stride >>= sizeof(pixel)-1;\
  725. \
  726. av_assert2(x<8 && y<8 && x>=0 && y>=0);\
  727. \
  728. if(D){\
  729. for(i=0; i<h; i++){\
  730. OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
  731. OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
  732. OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
  733. OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
  734. OP(dst[4], (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5]));\
  735. OP(dst[5], (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6]));\
  736. OP(dst[6], (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7]));\
  737. OP(dst[7], (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8]));\
  738. dst+= stride;\
  739. src+= stride;\
  740. }\
  741. }else{\
  742. const int E= B+C;\
  743. const int step= C ? stride : 1;\
  744. for(i=0; i<h; i++){\
  745. OP(dst[0], (A*src[0] + E*src[step+0]));\
  746. OP(dst[1], (A*src[1] + E*src[step+1]));\
  747. OP(dst[2], (A*src[2] + E*src[step+2]));\
  748. OP(dst[3], (A*src[3] + E*src[step+3]));\
  749. OP(dst[4], (A*src[4] + E*src[step+4]));\
  750. OP(dst[5], (A*src[5] + E*src[step+5]));\
  751. OP(dst[6], (A*src[6] + E*src[step+6]));\
  752. OP(dst[7], (A*src[7] + E*src[step+7]));\
  753. dst+= stride;\
  754. src+= stride;\
  755. }\
  756. }\
  757. }
  758. #define op_avg(a, b) a = (((a)+(((b) + 32)>>6)+1)>>1)
  759. #define op_put(a, b) a = (((b) + 32)>>6)
  760. H264_CHROMA_MC(put_ , op_put)
  761. H264_CHROMA_MC(avg_ , op_avg)
  762. #undef op_avg
  763. #undef op_put
  764. #define H264_LOWPASS(OPNAME, OP, OP2) \
  765. static av_unused void FUNC(OPNAME ## h264_qpel2_h_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
  766. const int h=2;\
  767. INIT_CLIP\
  768. int i;\
  769. pixel *dst = (pixel*)p_dst;\
  770. pixel *src = (pixel*)p_src;\
  771. dstStride >>= sizeof(pixel)-1;\
  772. srcStride >>= sizeof(pixel)-1;\
  773. for(i=0; i<h; i++)\
  774. {\
  775. OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
  776. OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
  777. dst+=dstStride;\
  778. src+=srcStride;\
  779. }\
  780. }\
  781. \
  782. static av_unused void FUNC(OPNAME ## h264_qpel2_v_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
  783. const int w=2;\
  784. INIT_CLIP\
  785. int i;\
  786. pixel *dst = (pixel*)p_dst;\
  787. pixel *src = (pixel*)p_src;\
  788. dstStride >>= sizeof(pixel)-1;\
  789. srcStride >>= sizeof(pixel)-1;\
  790. for(i=0; i<w; i++)\
  791. {\
  792. const int srcB= src[-2*srcStride];\
  793. const int srcA= src[-1*srcStride];\
  794. const int src0= src[0 *srcStride];\
  795. const int src1= src[1 *srcStride];\
  796. const int src2= src[2 *srcStride];\
  797. const int src3= src[3 *srcStride];\
  798. const int src4= src[4 *srcStride];\
  799. OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
  800. OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
  801. dst++;\
  802. src++;\
  803. }\
  804. }\
  805. \
  806. static av_unused void FUNC(OPNAME ## h264_qpel2_hv_lowpass)(uint8_t *p_dst, pixeltmp *tmp, uint8_t *p_src, int dstStride, int tmpStride, int srcStride){\
  807. const int h=2;\
  808. const int w=2;\
  809. const int pad = (BIT_DEPTH == 10) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
  810. INIT_CLIP\
  811. int i;\
  812. pixel *dst = (pixel*)p_dst;\
  813. pixel *src = (pixel*)p_src;\
  814. dstStride >>= sizeof(pixel)-1;\
  815. srcStride >>= sizeof(pixel)-1;\
  816. src -= 2*srcStride;\
  817. for(i=0; i<h+5; i++)\
  818. {\
  819. tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]) + pad;\
  820. tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]) + pad;\
  821. tmp+=tmpStride;\
  822. src+=srcStride;\
  823. }\
  824. tmp -= tmpStride*(h+5-2);\
  825. for(i=0; i<w; i++)\
  826. {\
  827. const int tmpB= tmp[-2*tmpStride] - pad;\
  828. const int tmpA= tmp[-1*tmpStride] - pad;\
  829. const int tmp0= tmp[0 *tmpStride] - pad;\
  830. const int tmp1= tmp[1 *tmpStride] - pad;\
  831. const int tmp2= tmp[2 *tmpStride] - pad;\
  832. const int tmp3= tmp[3 *tmpStride] - pad;\
  833. const int tmp4= tmp[4 *tmpStride] - pad;\
  834. OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
  835. OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
  836. dst++;\
  837. tmp++;\
  838. }\
  839. }\
  840. static void FUNC(OPNAME ## h264_qpel4_h_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
  841. const int h=4;\
  842. INIT_CLIP\
  843. int i;\
  844. pixel *dst = (pixel*)p_dst;\
  845. pixel *src = (pixel*)p_src;\
  846. dstStride >>= sizeof(pixel)-1;\
  847. srcStride >>= sizeof(pixel)-1;\
  848. for(i=0; i<h; i++)\
  849. {\
  850. OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
  851. OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
  852. OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]));\
  853. OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]));\
  854. dst+=dstStride;\
  855. src+=srcStride;\
  856. }\
  857. }\
  858. \
  859. static void FUNC(OPNAME ## h264_qpel4_v_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
  860. const int w=4;\
  861. INIT_CLIP\
  862. int i;\
  863. pixel *dst = (pixel*)p_dst;\
  864. pixel *src = (pixel*)p_src;\
  865. dstStride >>= sizeof(pixel)-1;\
  866. srcStride >>= sizeof(pixel)-1;\
  867. for(i=0; i<w; i++)\
  868. {\
  869. const int srcB= src[-2*srcStride];\
  870. const int srcA= src[-1*srcStride];\
  871. const int src0= src[0 *srcStride];\
  872. const int src1= src[1 *srcStride];\
  873. const int src2= src[2 *srcStride];\
  874. const int src3= src[3 *srcStride];\
  875. const int src4= src[4 *srcStride];\
  876. const int src5= src[5 *srcStride];\
  877. const int src6= src[6 *srcStride];\
  878. OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
  879. OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
  880. OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
  881. OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
  882. dst++;\
  883. src++;\
  884. }\
  885. }\
  886. \
  887. static void FUNC(OPNAME ## h264_qpel4_hv_lowpass)(uint8_t *p_dst, pixeltmp *tmp, uint8_t *p_src, int dstStride, int tmpStride, int srcStride){\
  888. const int h=4;\
  889. const int w=4;\
  890. const int pad = (BIT_DEPTH == 10) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
  891. INIT_CLIP\
  892. int i;\
  893. pixel *dst = (pixel*)p_dst;\
  894. pixel *src = (pixel*)p_src;\
  895. dstStride >>= sizeof(pixel)-1;\
  896. srcStride >>= sizeof(pixel)-1;\
  897. src -= 2*srcStride;\
  898. for(i=0; i<h+5; i++)\
  899. {\
  900. tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]) + pad;\
  901. tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]) + pad;\
  902. tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]) + pad;\
  903. tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]) + pad;\
  904. tmp+=tmpStride;\
  905. src+=srcStride;\
  906. }\
  907. tmp -= tmpStride*(h+5-2);\
  908. for(i=0; i<w; i++)\
  909. {\
  910. const int tmpB= tmp[-2*tmpStride] - pad;\
  911. const int tmpA= tmp[-1*tmpStride] - pad;\
  912. const int tmp0= tmp[0 *tmpStride] - pad;\
  913. const int tmp1= tmp[1 *tmpStride] - pad;\
  914. const int tmp2= tmp[2 *tmpStride] - pad;\
  915. const int tmp3= tmp[3 *tmpStride] - pad;\
  916. const int tmp4= tmp[4 *tmpStride] - pad;\
  917. const int tmp5= tmp[5 *tmpStride] - pad;\
  918. const int tmp6= tmp[6 *tmpStride] - pad;\
  919. OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
  920. OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
  921. OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
  922. OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
  923. dst++;\
  924. tmp++;\
  925. }\
  926. }\
  927. \
  928. static void FUNC(OPNAME ## h264_qpel8_h_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
  929. const int h=8;\
  930. INIT_CLIP\
  931. int i;\
  932. pixel *dst = (pixel*)p_dst;\
  933. pixel *src = (pixel*)p_src;\
  934. dstStride >>= sizeof(pixel)-1;\
  935. srcStride >>= sizeof(pixel)-1;\
  936. for(i=0; i<h; i++)\
  937. {\
  938. OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]));\
  939. OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]));\
  940. OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]));\
  941. OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]));\
  942. OP(dst[4], (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]));\
  943. OP(dst[5], (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]));\
  944. OP(dst[6], (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]));\
  945. OP(dst[7], (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]));\
  946. dst+=dstStride;\
  947. src+=srcStride;\
  948. }\
  949. }\
  950. \
  951. static void FUNC(OPNAME ## h264_qpel8_v_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
  952. const int w=8;\
  953. INIT_CLIP\
  954. int i;\
  955. pixel *dst = (pixel*)p_dst;\
  956. pixel *src = (pixel*)p_src;\
  957. dstStride >>= sizeof(pixel)-1;\
  958. srcStride >>= sizeof(pixel)-1;\
  959. for(i=0; i<w; i++)\
  960. {\
  961. const int srcB= src[-2*srcStride];\
  962. const int srcA= src[-1*srcStride];\
  963. const int src0= src[0 *srcStride];\
  964. const int src1= src[1 *srcStride];\
  965. const int src2= src[2 *srcStride];\
  966. const int src3= src[3 *srcStride];\
  967. const int src4= src[4 *srcStride];\
  968. const int src5= src[5 *srcStride];\
  969. const int src6= src[6 *srcStride];\
  970. const int src7= src[7 *srcStride];\
  971. const int src8= src[8 *srcStride];\
  972. const int src9= src[9 *srcStride];\
  973. const int src10=src[10*srcStride];\
  974. OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
  975. OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
  976. OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
  977. OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
  978. OP(dst[4*dstStride], (src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
  979. OP(dst[5*dstStride], (src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
  980. OP(dst[6*dstStride], (src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
  981. OP(dst[7*dstStride], (src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
  982. dst++;\
  983. src++;\
  984. }\
  985. }\
  986. \
  987. static void FUNC(OPNAME ## h264_qpel8_hv_lowpass)(uint8_t *p_dst, pixeltmp *tmp, uint8_t *p_src, int dstStride, int tmpStride, int srcStride){\
  988. const int h=8;\
  989. const int w=8;\
  990. const int pad = (BIT_DEPTH == 10) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
  991. INIT_CLIP\
  992. int i;\
  993. pixel *dst = (pixel*)p_dst;\
  994. pixel *src = (pixel*)p_src;\
  995. dstStride >>= sizeof(pixel)-1;\
  996. srcStride >>= sizeof(pixel)-1;\
  997. src -= 2*srcStride;\
  998. for(i=0; i<h+5; i++)\
  999. {\
  1000. tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]) + pad;\
  1001. tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]) + pad;\
  1002. tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]) + pad;\
  1003. tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]) + pad;\
  1004. tmp[4]= (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]) + pad;\
  1005. tmp[5]= (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]) + pad;\
  1006. tmp[6]= (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]) + pad;\
  1007. tmp[7]= (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]) + pad;\
  1008. tmp+=tmpStride;\
  1009. src+=srcStride;\
  1010. }\
  1011. tmp -= tmpStride*(h+5-2);\
  1012. for(i=0; i<w; i++)\
  1013. {\
  1014. const int tmpB= tmp[-2*tmpStride] - pad;\
  1015. const int tmpA= tmp[-1*tmpStride] - pad;\
  1016. const int tmp0= tmp[0 *tmpStride] - pad;\
  1017. const int tmp1= tmp[1 *tmpStride] - pad;\
  1018. const int tmp2= tmp[2 *tmpStride] - pad;\
  1019. const int tmp3= tmp[3 *tmpStride] - pad;\
  1020. const int tmp4= tmp[4 *tmpStride] - pad;\
  1021. const int tmp5= tmp[5 *tmpStride] - pad;\
  1022. const int tmp6= tmp[6 *tmpStride] - pad;\
  1023. const int tmp7= tmp[7 *tmpStride] - pad;\
  1024. const int tmp8= tmp[8 *tmpStride] - pad;\
  1025. const int tmp9= tmp[9 *tmpStride] - pad;\
  1026. const int tmp10=tmp[10*tmpStride] - pad;\
  1027. OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
  1028. OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
  1029. OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
  1030. OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
  1031. OP2(dst[4*dstStride], (tmp4+tmp5)*20 - (tmp3+tmp6)*5 + (tmp2+tmp7));\
  1032. OP2(dst[5*dstStride], (tmp5+tmp6)*20 - (tmp4+tmp7)*5 + (tmp3+tmp8));\
  1033. OP2(dst[6*dstStride], (tmp6+tmp7)*20 - (tmp5+tmp8)*5 + (tmp4+tmp9));\
  1034. OP2(dst[7*dstStride], (tmp7+tmp8)*20 - (tmp6+tmp9)*5 + (tmp5+tmp10));\
  1035. dst++;\
  1036. tmp++;\
  1037. }\
  1038. }\
  1039. \
  1040. static void FUNC(OPNAME ## h264_qpel16_v_lowpass)(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1041. FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst , src , dstStride, srcStride);\
  1042. FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
  1043. src += 8*srcStride;\
  1044. dst += 8*dstStride;\
  1045. FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst , src , dstStride, srcStride);\
  1046. FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
  1047. }\
  1048. \
  1049. static void FUNC(OPNAME ## h264_qpel16_h_lowpass)(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1050. FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst , src , dstStride, srcStride);\
  1051. FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
  1052. src += 8*srcStride;\
  1053. dst += 8*dstStride;\
  1054. FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst , src , dstStride, srcStride);\
  1055. FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
  1056. }\
  1057. \
  1058. static void FUNC(OPNAME ## h264_qpel16_hv_lowpass)(uint8_t *dst, pixeltmp *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  1059. FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst , tmp , src , dstStride, tmpStride, srcStride);\
  1060. FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst+8*sizeof(pixel), tmp+8, src+8*sizeof(pixel), dstStride, tmpStride, srcStride);\
  1061. src += 8*srcStride;\
  1062. dst += 8*dstStride;\
  1063. FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst , tmp , src , dstStride, tmpStride, srcStride);\
  1064. FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst+8*sizeof(pixel), tmp+8, src+8*sizeof(pixel), dstStride, tmpStride, srcStride);\
  1065. }\
  1066. #define H264_MC(OPNAME, SIZE) \
  1067. static av_unused void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc00)(uint8_t *dst, uint8_t *src, int stride){\
  1068. FUNCC(OPNAME ## pixels ## SIZE)(dst, src, stride, SIZE);\
  1069. }\
  1070. \
  1071. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc10)(uint8_t *dst, uint8_t *src, int stride){\
  1072. uint8_t half[SIZE*SIZE*sizeof(pixel)];\
  1073. FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(half, src, SIZE*sizeof(pixel), stride);\
  1074. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, src, half, stride, stride, SIZE*sizeof(pixel), SIZE);\
  1075. }\
  1076. \
  1077. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc20)(uint8_t *dst, uint8_t *src, int stride){\
  1078. FUNC(OPNAME ## h264_qpel ## SIZE ## _h_lowpass)(dst, src, stride, stride);\
  1079. }\
  1080. \
  1081. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc30)(uint8_t *dst, uint8_t *src, int stride){\
  1082. uint8_t half[SIZE*SIZE*sizeof(pixel)];\
  1083. FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(half, src, SIZE*sizeof(pixel), stride);\
  1084. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, src+sizeof(pixel), half, stride, stride, SIZE*sizeof(pixel), SIZE);\
  1085. }\
  1086. \
  1087. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc01)(uint8_t *dst, uint8_t *src, int stride){\
  1088. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1089. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1090. uint8_t half[SIZE*SIZE*sizeof(pixel)];\
  1091. FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
  1092. FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(half, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
  1093. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, full_mid, half, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1094. }\
  1095. \
  1096. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc02)(uint8_t *dst, uint8_t *src, int stride){\
  1097. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1098. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1099. FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
  1100. FUNC(OPNAME ## h264_qpel ## SIZE ## _v_lowpass)(dst, full_mid, stride, SIZE*sizeof(pixel));\
  1101. }\
  1102. \
  1103. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc03)(uint8_t *dst, uint8_t *src, int stride){\
  1104. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1105. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1106. uint8_t half[SIZE*SIZE*sizeof(pixel)];\
  1107. FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
  1108. FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(half, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
  1109. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, full_mid+SIZE*sizeof(pixel), half, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1110. }\
  1111. \
  1112. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc11)(uint8_t *dst, uint8_t *src, int stride){\
  1113. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1114. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1115. uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
  1116. uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
  1117. FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
  1118. FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
  1119. FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
  1120. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1121. }\
  1122. \
  1123. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc31)(uint8_t *dst, uint8_t *src, int stride){\
  1124. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1125. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1126. uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
  1127. uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
  1128. FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
  1129. FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel), stride, SIZE + 5);\
  1130. FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
  1131. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1132. }\
  1133. \
  1134. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc13)(uint8_t *dst, uint8_t *src, int stride){\
  1135. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1136. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1137. uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
  1138. uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
  1139. FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
  1140. FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
  1141. FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
  1142. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1143. }\
  1144. \
  1145. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc33)(uint8_t *dst, uint8_t *src, int stride){\
  1146. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1147. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1148. uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
  1149. uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
  1150. FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
  1151. FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel), stride, SIZE + 5);\
  1152. FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
  1153. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1154. }\
  1155. \
  1156. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc22)(uint8_t *dst, uint8_t *src, int stride){\
  1157. pixeltmp tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
  1158. FUNC(OPNAME ## h264_qpel ## SIZE ## _hv_lowpass)(dst, tmp, src, stride, SIZE*sizeof(pixel), stride);\
  1159. }\
  1160. \
  1161. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc21)(uint8_t *dst, uint8_t *src, int stride){\
  1162. pixeltmp tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
  1163. uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
  1164. uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
  1165. FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
  1166. FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
  1167. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1168. }\
  1169. \
  1170. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc23)(uint8_t *dst, uint8_t *src, int stride){\
  1171. pixeltmp tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
  1172. uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
  1173. uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
  1174. FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
  1175. FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
  1176. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1177. }\
  1178. \
  1179. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc12)(uint8_t *dst, uint8_t *src, int stride){\
  1180. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1181. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1182. pixeltmp tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
  1183. uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
  1184. uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
  1185. FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
  1186. FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
  1187. FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
  1188. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfV, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1189. }\
  1190. \
  1191. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc32)(uint8_t *dst, uint8_t *src, int stride){\
  1192. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1193. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1194. pixeltmp tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
  1195. uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
  1196. uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
  1197. FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel), stride, SIZE + 5);\
  1198. FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
  1199. FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
  1200. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfV, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1201. }\
  1202. #define op_avg(a, b) a = (((a)+CLIP(((b) + 16)>>5)+1)>>1)
  1203. //#define op_avg2(a, b) a = (((a)*w1+cm[((b) + 16)>>5]*w2 + o + 64)>>7)
  1204. #define op_put(a, b) a = CLIP(((b) + 16)>>5)
  1205. #define op2_avg(a, b) a = (((a)+CLIP(((b) + 512)>>10)+1)>>1)
  1206. #define op2_put(a, b) a = CLIP(((b) + 512)>>10)
  1207. H264_LOWPASS(put_ , op_put, op2_put)
  1208. H264_LOWPASS(avg_ , op_avg, op2_avg)
  1209. H264_MC(put_, 2)
  1210. H264_MC(put_, 4)
  1211. H264_MC(put_, 8)
  1212. H264_MC(put_, 16)
  1213. H264_MC(avg_, 4)
  1214. H264_MC(avg_, 8)
  1215. H264_MC(avg_, 16)
  1216. #undef op_avg
  1217. #undef op_put
  1218. #undef op2_avg
  1219. #undef op2_put
  1220. #if BIT_DEPTH == 8
  1221. # define put_h264_qpel8_mc00_8_c ff_put_pixels8x8_8_c
  1222. # define avg_h264_qpel8_mc00_8_c ff_avg_pixels8x8_8_c
  1223. # define put_h264_qpel16_mc00_8_c ff_put_pixels16x16_8_c
  1224. # define avg_h264_qpel16_mc00_8_c ff_avg_pixels16x16_8_c
  1225. #elif BIT_DEPTH == 9
  1226. # define put_h264_qpel8_mc00_9_c ff_put_pixels8x8_9_c
  1227. # define avg_h264_qpel8_mc00_9_c ff_avg_pixels8x8_9_c
  1228. # define put_h264_qpel16_mc00_9_c ff_put_pixels16x16_9_c
  1229. # define avg_h264_qpel16_mc00_9_c ff_avg_pixels16x16_9_c
  1230. #elif BIT_DEPTH == 10
  1231. # define put_h264_qpel8_mc00_10_c ff_put_pixels8x8_10_c
  1232. # define avg_h264_qpel8_mc00_10_c ff_avg_pixels8x8_10_c
  1233. # define put_h264_qpel16_mc00_10_c ff_put_pixels16x16_10_c
  1234. # define avg_h264_qpel16_mc00_10_c ff_avg_pixels16x16_10_c
  1235. #elif BIT_DEPTH == 12
  1236. # define put_h264_qpel8_mc00_12_c ff_put_pixels8x8_12_c
  1237. # define avg_h264_qpel8_mc00_12_c ff_avg_pixels8x8_12_c
  1238. # define put_h264_qpel16_mc00_12_c ff_put_pixels16x16_12_c
  1239. # define avg_h264_qpel16_mc00_12_c ff_avg_pixels16x16_12_c
  1240. #elif BIT_DEPTH == 14
  1241. # define put_h264_qpel8_mc00_14_c ff_put_pixels8x8_14_c
  1242. # define avg_h264_qpel8_mc00_14_c ff_avg_pixels8x8_14_c
  1243. # define put_h264_qpel16_mc00_14_c ff_put_pixels16x16_14_c
  1244. # define avg_h264_qpel16_mc00_14_c ff_avg_pixels16x16_14_c
  1245. #endif
  1246. void FUNCC(ff_put_pixels8x8)(uint8_t *dst, uint8_t *src, int stride) {
  1247. FUNCC(put_pixels8)(dst, src, stride, 8);
  1248. }
  1249. void FUNCC(ff_avg_pixels8x8)(uint8_t *dst, uint8_t *src, int stride) {
  1250. FUNCC(avg_pixels8)(dst, src, stride, 8);
  1251. }
  1252. void FUNCC(ff_put_pixels16x16)(uint8_t *dst, uint8_t *src, int stride) {
  1253. FUNCC(put_pixels16)(dst, src, stride, 16);
  1254. }
  1255. void FUNCC(ff_avg_pixels16x16)(uint8_t *dst, uint8_t *src, int stride) {
  1256. FUNCC(avg_pixels16)(dst, src, stride, 16);
  1257. }