You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1392 lines
56KB

  1. /*
  2. * DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * gmc & q-pel & 32/64 bit based MC by Michael Niedermayer <michaelni@gmx.at>
  7. *
  8. * This file is part of Libav.
  9. *
  10. * Libav is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * Libav is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with Libav; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. /**
  25. * @file
  26. * DSP utils
  27. */
  28. #include "high_bit_depth.h"
  29. static inline void FUNC(copy_block2)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
  30. {
  31. int i;
  32. for(i=0; i<h; i++)
  33. {
  34. AV_WN2P(dst , AV_RN2P(src ));
  35. dst+=dstStride;
  36. src+=srcStride;
  37. }
  38. }
  39. static inline void FUNC(copy_block4)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
  40. {
  41. int i;
  42. for(i=0; i<h; i++)
  43. {
  44. AV_WN4P(dst , AV_RN4P(src ));
  45. dst+=dstStride;
  46. src+=srcStride;
  47. }
  48. }
  49. static inline void FUNC(copy_block8)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
  50. {
  51. int i;
  52. for(i=0; i<h; i++)
  53. {
  54. AV_WN4P(dst , AV_RN4P(src ));
  55. AV_WN4P(dst+4*sizeof(pixel), AV_RN4P(src+4*sizeof(pixel)));
  56. dst+=dstStride;
  57. src+=srcStride;
  58. }
  59. }
  60. static inline void FUNC(copy_block16)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
  61. {
  62. int i;
  63. for(i=0; i<h; i++)
  64. {
  65. AV_WN4P(dst , AV_RN4P(src ));
  66. AV_WN4P(dst+ 4*sizeof(pixel), AV_RN4P(src+ 4*sizeof(pixel)));
  67. AV_WN4P(dst+ 8*sizeof(pixel), AV_RN4P(src+ 8*sizeof(pixel)));
  68. AV_WN4P(dst+12*sizeof(pixel), AV_RN4P(src+12*sizeof(pixel)));
  69. dst+=dstStride;
  70. src+=srcStride;
  71. }
  72. }
  73. /* draw the edges of width 'w' of an image of size width, height */
  74. //FIXME check that this is ok for mpeg4 interlaced
  75. static void FUNCC(draw_edges)(uint8_t *_buf, int _wrap, int width, int height, int w, int h, int sides)
  76. {
  77. pixel *buf = (pixel*)_buf;
  78. int wrap = _wrap / sizeof(pixel);
  79. pixel *ptr, *last_line;
  80. int i;
  81. /* left and right */
  82. ptr = buf;
  83. for(i=0;i<height;i++) {
  84. #if BIT_DEPTH > 8
  85. int j;
  86. for (j = 0; j < w; j++) {
  87. ptr[j-w] = ptr[0];
  88. ptr[j+width] = ptr[width-1];
  89. }
  90. #else
  91. memset(ptr - w, ptr[0], w);
  92. memset(ptr + width, ptr[width-1], w);
  93. #endif
  94. ptr += wrap;
  95. }
  96. /* top and bottom + corners */
  97. buf -= w;
  98. last_line = buf + (height - 1) * wrap;
  99. if (sides & EDGE_TOP)
  100. for(i = 0; i < h; i++)
  101. memcpy(buf - (i + 1) * wrap, buf, (width + w + w) * sizeof(pixel)); // top
  102. if (sides & EDGE_BOTTOM)
  103. for (i = 0; i < h; i++)
  104. memcpy(last_line + (i + 1) * wrap, last_line, (width + w + w) * sizeof(pixel)); // bottom
  105. }
  106. /**
  107. * Copy a rectangular area of samples to a temporary buffer and replicate the border samples.
  108. * @param buf destination buffer
  109. * @param src source buffer
  110. * @param linesize number of bytes between 2 vertically adjacent samples in both the source and destination buffers
  111. * @param block_w width of block
  112. * @param block_h height of block
  113. * @param src_x x coordinate of the top left sample of the block in the source buffer
  114. * @param src_y y coordinate of the top left sample of the block in the source buffer
  115. * @param w width of the source buffer
  116. * @param h height of the source buffer
  117. */
  118. void FUNC(ff_emulated_edge_mc)(uint8_t *buf, const uint8_t *src, int linesize, int block_w, int block_h,
  119. int src_x, int src_y, int w, int h){
  120. int x, y;
  121. int start_y, start_x, end_y, end_x;
  122. if(src_y>= h){
  123. src+= (h-1-src_y)*linesize;
  124. src_y=h-1;
  125. }else if(src_y<=-block_h){
  126. src+= (1-block_h-src_y)*linesize;
  127. src_y=1-block_h;
  128. }
  129. if(src_x>= w){
  130. src+= (w-1-src_x)*sizeof(pixel);
  131. src_x=w-1;
  132. }else if(src_x<=-block_w){
  133. src+= (1-block_w-src_x)*sizeof(pixel);
  134. src_x=1-block_w;
  135. }
  136. start_y= FFMAX(0, -src_y);
  137. start_x= FFMAX(0, -src_x);
  138. end_y= FFMIN(block_h, h-src_y);
  139. end_x= FFMIN(block_w, w-src_x);
  140. assert(start_y < end_y && block_h);
  141. assert(start_x < end_x && block_w);
  142. w = end_x - start_x;
  143. src += start_y*linesize + start_x*sizeof(pixel);
  144. buf += start_x*sizeof(pixel);
  145. //top
  146. for(y=0; y<start_y; y++){
  147. memcpy(buf, src, w*sizeof(pixel));
  148. buf += linesize;
  149. }
  150. // copy existing part
  151. for(; y<end_y; y++){
  152. memcpy(buf, src, w*sizeof(pixel));
  153. src += linesize;
  154. buf += linesize;
  155. }
  156. //bottom
  157. src -= linesize;
  158. for(; y<block_h; y++){
  159. memcpy(buf, src, w*sizeof(pixel));
  160. buf += linesize;
  161. }
  162. buf -= block_h * linesize + start_x*sizeof(pixel);
  163. while (block_h--){
  164. pixel *bufp = (pixel*)buf;
  165. //left
  166. for(x=0; x<start_x; x++){
  167. bufp[x] = bufp[start_x];
  168. }
  169. //right
  170. for(x=end_x; x<block_w; x++){
  171. bufp[x] = bufp[end_x - 1];
  172. }
  173. buf += linesize;
  174. }
  175. }
  176. static void FUNCC(add_pixels8)(uint8_t *restrict _pixels, DCTELEM *_block, int line_size)
  177. {
  178. int i;
  179. pixel *restrict pixels = (pixel *restrict)_pixels;
  180. dctcoef *block = (dctcoef*)_block;
  181. line_size /= sizeof(pixel);
  182. for(i=0;i<8;i++) {
  183. pixels[0] += block[0];
  184. pixels[1] += block[1];
  185. pixels[2] += block[2];
  186. pixels[3] += block[3];
  187. pixels[4] += block[4];
  188. pixels[5] += block[5];
  189. pixels[6] += block[6];
  190. pixels[7] += block[7];
  191. pixels += line_size;
  192. block += 8;
  193. }
  194. }
  195. static void FUNCC(add_pixels4)(uint8_t *restrict _pixels, DCTELEM *_block, int line_size)
  196. {
  197. int i;
  198. pixel *restrict pixels = (pixel *restrict)_pixels;
  199. dctcoef *block = (dctcoef*)_block;
  200. line_size /= sizeof(pixel);
  201. for(i=0;i<4;i++) {
  202. pixels[0] += block[0];
  203. pixels[1] += block[1];
  204. pixels[2] += block[2];
  205. pixels[3] += block[3];
  206. pixels += line_size;
  207. block += 4;
  208. }
  209. }
  210. #if 0
  211. #define PIXOP2(OPNAME, OP) \
  212. static void OPNAME ## _pixels(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  213. {\
  214. int i;\
  215. for(i=0; i<h; i++){\
  216. OP(*((uint64_t*)block), AV_RN64(pixels));\
  217. pixels+=line_size;\
  218. block +=line_size;\
  219. }\
  220. }\
  221. \
  222. static void OPNAME ## _no_rnd_pixels_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  223. {\
  224. int i;\
  225. for(i=0; i<h; i++){\
  226. const uint64_t a= AV_RN64(pixels );\
  227. const uint64_t b= AV_RN64(pixels+1);\
  228. OP(*((uint64_t*)block), (a&b) + (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
  229. pixels+=line_size;\
  230. block +=line_size;\
  231. }\
  232. }\
  233. \
  234. static void OPNAME ## _pixels_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  235. {\
  236. int i;\
  237. for(i=0; i<h; i++){\
  238. const uint64_t a= AV_RN64(pixels );\
  239. const uint64_t b= AV_RN64(pixels+1);\
  240. OP(*((uint64_t*)block), (a|b) - (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
  241. pixels+=line_size;\
  242. block +=line_size;\
  243. }\
  244. }\
  245. \
  246. static void OPNAME ## _no_rnd_pixels_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  247. {\
  248. int i;\
  249. for(i=0; i<h; i++){\
  250. const uint64_t a= AV_RN64(pixels );\
  251. const uint64_t b= AV_RN64(pixels+line_size);\
  252. OP(*((uint64_t*)block), (a&b) + (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
  253. pixels+=line_size;\
  254. block +=line_size;\
  255. }\
  256. }\
  257. \
  258. static void OPNAME ## _pixels_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  259. {\
  260. int i;\
  261. for(i=0; i<h; i++){\
  262. const uint64_t a= AV_RN64(pixels );\
  263. const uint64_t b= AV_RN64(pixels+line_size);\
  264. OP(*((uint64_t*)block), (a|b) - (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
  265. pixels+=line_size;\
  266. block +=line_size;\
  267. }\
  268. }\
  269. \
  270. static void OPNAME ## _pixels_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  271. {\
  272. int i;\
  273. const uint64_t a= AV_RN64(pixels );\
  274. const uint64_t b= AV_RN64(pixels+1);\
  275. uint64_t l0= (a&0x0303030303030303ULL)\
  276. + (b&0x0303030303030303ULL)\
  277. + 0x0202020202020202ULL;\
  278. uint64_t h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
  279. + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
  280. uint64_t l1,h1;\
  281. \
  282. pixels+=line_size;\
  283. for(i=0; i<h; i+=2){\
  284. uint64_t a= AV_RN64(pixels );\
  285. uint64_t b= AV_RN64(pixels+1);\
  286. l1= (a&0x0303030303030303ULL)\
  287. + (b&0x0303030303030303ULL);\
  288. h1= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
  289. + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
  290. OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
  291. pixels+=line_size;\
  292. block +=line_size;\
  293. a= AV_RN64(pixels );\
  294. b= AV_RN64(pixels+1);\
  295. l0= (a&0x0303030303030303ULL)\
  296. + (b&0x0303030303030303ULL)\
  297. + 0x0202020202020202ULL;\
  298. h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
  299. + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
  300. OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
  301. pixels+=line_size;\
  302. block +=line_size;\
  303. }\
  304. }\
  305. \
  306. static void OPNAME ## _no_rnd_pixels_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  307. {\
  308. int i;\
  309. const uint64_t a= AV_RN64(pixels );\
  310. const uint64_t b= AV_RN64(pixels+1);\
  311. uint64_t l0= (a&0x0303030303030303ULL)\
  312. + (b&0x0303030303030303ULL)\
  313. + 0x0101010101010101ULL;\
  314. uint64_t h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
  315. + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
  316. uint64_t l1,h1;\
  317. \
  318. pixels+=line_size;\
  319. for(i=0; i<h; i+=2){\
  320. uint64_t a= AV_RN64(pixels );\
  321. uint64_t b= AV_RN64(pixels+1);\
  322. l1= (a&0x0303030303030303ULL)\
  323. + (b&0x0303030303030303ULL);\
  324. h1= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
  325. + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
  326. OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
  327. pixels+=line_size;\
  328. block +=line_size;\
  329. a= AV_RN64(pixels );\
  330. b= AV_RN64(pixels+1);\
  331. l0= (a&0x0303030303030303ULL)\
  332. + (b&0x0303030303030303ULL)\
  333. + 0x0101010101010101ULL;\
  334. h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
  335. + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
  336. OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
  337. pixels+=line_size;\
  338. block +=line_size;\
  339. }\
  340. }\
  341. \
  342. CALL_2X_PIXELS(OPNAME ## _pixels16_c , OPNAME ## _pixels_c , 8*sizeof(pixel))\
  343. CALL_2X_PIXELS(OPNAME ## _pixels16_x2_c , OPNAME ## _pixels_x2_c , 8*sizeof(pixel))\
  344. CALL_2X_PIXELS(OPNAME ## _pixels16_y2_c , OPNAME ## _pixels_y2_c , 8*sizeof(pixel))\
  345. CALL_2X_PIXELS(OPNAME ## _pixels16_xy2_c, OPNAME ## _pixels_xy2_c, 8*sizeof(pixel))\
  346. CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_x2_c , OPNAME ## _no_rnd_pixels_x2_c , 8*sizeof(pixel))\
  347. CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_y2_c , OPNAME ## _no_rnd_pixels_y2_c , 8*sizeof(pixel))\
  348. CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_xy2_c, OPNAME ## _no_rnd_pixels_xy2_c, 8*sizeof(pixel))
  349. #define op_avg(a, b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEFEFEFEFEULL)>>1) )
  350. #else // 64 bit variant
  351. #define PIXOP2(OPNAME, OP) \
  352. static void FUNCC(OPNAME ## _pixels2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  353. int i;\
  354. for(i=0; i<h; i++){\
  355. OP(*((pixel2*)(block )), AV_RN2P(pixels ));\
  356. pixels+=line_size;\
  357. block +=line_size;\
  358. }\
  359. }\
  360. static void FUNCC(OPNAME ## _pixels4)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  361. int i;\
  362. for(i=0; i<h; i++){\
  363. OP(*((pixel4*)(block )), AV_RN4P(pixels ));\
  364. pixels+=line_size;\
  365. block +=line_size;\
  366. }\
  367. }\
  368. static void FUNCC(OPNAME ## _pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  369. int i;\
  370. for(i=0; i<h; i++){\
  371. OP(*((pixel4*)(block )), AV_RN4P(pixels ));\
  372. OP(*((pixel4*)(block+4*sizeof(pixel))), AV_RN4P(pixels+4*sizeof(pixel)));\
  373. pixels+=line_size;\
  374. block +=line_size;\
  375. }\
  376. }\
  377. static inline void FUNCC(OPNAME ## _no_rnd_pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  378. FUNCC(OPNAME ## _pixels8)(block, pixels, line_size, h);\
  379. }\
  380. \
  381. static inline void FUNC(OPNAME ## _no_rnd_pixels8_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  382. int src_stride1, int src_stride2, int h){\
  383. int i;\
  384. for(i=0; i<h; i++){\
  385. pixel4 a,b;\
  386. a= AV_RN4P(&src1[i*src_stride1 ]);\
  387. b= AV_RN4P(&src2[i*src_stride2 ]);\
  388. OP(*((pixel4*)&dst[i*dst_stride ]), no_rnd_avg_pixel4(a, b));\
  389. a= AV_RN4P(&src1[i*src_stride1+4*sizeof(pixel)]);\
  390. b= AV_RN4P(&src2[i*src_stride2+4*sizeof(pixel)]);\
  391. OP(*((pixel4*)&dst[i*dst_stride+4*sizeof(pixel)]), no_rnd_avg_pixel4(a, b));\
  392. }\
  393. }\
  394. \
  395. static inline void FUNC(OPNAME ## _pixels8_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  396. int src_stride1, int src_stride2, int h){\
  397. int i;\
  398. for(i=0; i<h; i++){\
  399. pixel4 a,b;\
  400. a= AV_RN4P(&src1[i*src_stride1 ]);\
  401. b= AV_RN4P(&src2[i*src_stride2 ]);\
  402. OP(*((pixel4*)&dst[i*dst_stride ]), rnd_avg_pixel4(a, b));\
  403. a= AV_RN4P(&src1[i*src_stride1+4*sizeof(pixel)]);\
  404. b= AV_RN4P(&src2[i*src_stride2+4*sizeof(pixel)]);\
  405. OP(*((pixel4*)&dst[i*dst_stride+4*sizeof(pixel)]), rnd_avg_pixel4(a, b));\
  406. }\
  407. }\
  408. \
  409. static inline void FUNC(OPNAME ## _pixels4_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  410. int src_stride1, int src_stride2, int h){\
  411. int i;\
  412. for(i=0; i<h; i++){\
  413. pixel4 a,b;\
  414. a= AV_RN4P(&src1[i*src_stride1 ]);\
  415. b= AV_RN4P(&src2[i*src_stride2 ]);\
  416. OP(*((pixel4*)&dst[i*dst_stride ]), rnd_avg_pixel4(a, b));\
  417. }\
  418. }\
  419. \
  420. static inline void FUNC(OPNAME ## _pixels2_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  421. int src_stride1, int src_stride2, int h){\
  422. int i;\
  423. for(i=0; i<h; i++){\
  424. pixel4 a,b;\
  425. a= AV_RN2P(&src1[i*src_stride1 ]);\
  426. b= AV_RN2P(&src2[i*src_stride2 ]);\
  427. OP(*((pixel2*)&dst[i*dst_stride ]), rnd_avg_pixel4(a, b));\
  428. }\
  429. }\
  430. \
  431. static inline void FUNC(OPNAME ## _pixels16_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  432. int src_stride1, int src_stride2, int h){\
  433. FUNC(OPNAME ## _pixels8_l2)(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);\
  434. FUNC(OPNAME ## _pixels8_l2)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, h);\
  435. }\
  436. \
  437. static inline void FUNC(OPNAME ## _no_rnd_pixels16_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  438. int src_stride1, int src_stride2, int h){\
  439. FUNC(OPNAME ## _no_rnd_pixels8_l2)(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);\
  440. FUNC(OPNAME ## _no_rnd_pixels8_l2)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, h);\
  441. }\
  442. \
  443. static inline void FUNCC(OPNAME ## _no_rnd_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  444. FUNC(OPNAME ## _no_rnd_pixels8_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
  445. }\
  446. \
  447. static inline void FUNCC(OPNAME ## _pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  448. FUNC(OPNAME ## _pixels8_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
  449. }\
  450. \
  451. static inline void FUNCC(OPNAME ## _no_rnd_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  452. FUNC(OPNAME ## _no_rnd_pixels8_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
  453. }\
  454. \
  455. static inline void FUNCC(OPNAME ## _pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  456. FUNC(OPNAME ## _pixels8_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
  457. }\
  458. \
  459. static inline void FUNC(OPNAME ## _pixels8_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
  460. int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  461. /* FIXME HIGH BIT DEPTH */\
  462. int i;\
  463. for(i=0; i<h; i++){\
  464. uint32_t a, b, c, d, l0, l1, h0, h1;\
  465. a= AV_RN32(&src1[i*src_stride1]);\
  466. b= AV_RN32(&src2[i*src_stride2]);\
  467. c= AV_RN32(&src3[i*src_stride3]);\
  468. d= AV_RN32(&src4[i*src_stride4]);\
  469. l0= (a&0x03030303UL)\
  470. + (b&0x03030303UL)\
  471. + 0x02020202UL;\
  472. h0= ((a&0xFCFCFCFCUL)>>2)\
  473. + ((b&0xFCFCFCFCUL)>>2);\
  474. l1= (c&0x03030303UL)\
  475. + (d&0x03030303UL);\
  476. h1= ((c&0xFCFCFCFCUL)>>2)\
  477. + ((d&0xFCFCFCFCUL)>>2);\
  478. OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  479. a= AV_RN32(&src1[i*src_stride1+4]);\
  480. b= AV_RN32(&src2[i*src_stride2+4]);\
  481. c= AV_RN32(&src3[i*src_stride3+4]);\
  482. d= AV_RN32(&src4[i*src_stride4+4]);\
  483. l0= (a&0x03030303UL)\
  484. + (b&0x03030303UL)\
  485. + 0x02020202UL;\
  486. h0= ((a&0xFCFCFCFCUL)>>2)\
  487. + ((b&0xFCFCFCFCUL)>>2);\
  488. l1= (c&0x03030303UL)\
  489. + (d&0x03030303UL);\
  490. h1= ((c&0xFCFCFCFCUL)>>2)\
  491. + ((d&0xFCFCFCFCUL)>>2);\
  492. OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  493. }\
  494. }\
  495. \
  496. static inline void FUNCC(OPNAME ## _pixels4_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  497. FUNC(OPNAME ## _pixels4_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
  498. }\
  499. \
  500. static inline void FUNCC(OPNAME ## _pixels4_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  501. FUNC(OPNAME ## _pixels4_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
  502. }\
  503. \
  504. static inline void FUNCC(OPNAME ## _pixels2_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  505. FUNC(OPNAME ## _pixels2_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
  506. }\
  507. \
  508. static inline void FUNCC(OPNAME ## _pixels2_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  509. FUNC(OPNAME ## _pixels2_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
  510. }\
  511. \
  512. static inline void FUNC(OPNAME ## _no_rnd_pixels8_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
  513. int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  514. /* FIXME HIGH BIT DEPTH*/\
  515. int i;\
  516. for(i=0; i<h; i++){\
  517. uint32_t a, b, c, d, l0, l1, h0, h1;\
  518. a= AV_RN32(&src1[i*src_stride1]);\
  519. b= AV_RN32(&src2[i*src_stride2]);\
  520. c= AV_RN32(&src3[i*src_stride3]);\
  521. d= AV_RN32(&src4[i*src_stride4]);\
  522. l0= (a&0x03030303UL)\
  523. + (b&0x03030303UL)\
  524. + 0x01010101UL;\
  525. h0= ((a&0xFCFCFCFCUL)>>2)\
  526. + ((b&0xFCFCFCFCUL)>>2);\
  527. l1= (c&0x03030303UL)\
  528. + (d&0x03030303UL);\
  529. h1= ((c&0xFCFCFCFCUL)>>2)\
  530. + ((d&0xFCFCFCFCUL)>>2);\
  531. OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  532. a= AV_RN32(&src1[i*src_stride1+4]);\
  533. b= AV_RN32(&src2[i*src_stride2+4]);\
  534. c= AV_RN32(&src3[i*src_stride3+4]);\
  535. d= AV_RN32(&src4[i*src_stride4+4]);\
  536. l0= (a&0x03030303UL)\
  537. + (b&0x03030303UL)\
  538. + 0x01010101UL;\
  539. h0= ((a&0xFCFCFCFCUL)>>2)\
  540. + ((b&0xFCFCFCFCUL)>>2);\
  541. l1= (c&0x03030303UL)\
  542. + (d&0x03030303UL);\
  543. h1= ((c&0xFCFCFCFCUL)>>2)\
  544. + ((d&0xFCFCFCFCUL)>>2);\
  545. OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  546. }\
  547. }\
  548. static inline void FUNC(OPNAME ## _pixels16_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
  549. int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  550. FUNC(OPNAME ## _pixels8_l4)(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
  551. FUNC(OPNAME ## _pixels8_l4)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), src3+8*sizeof(pixel), src4+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
  552. }\
  553. static inline void FUNC(OPNAME ## _no_rnd_pixels16_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
  554. int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  555. FUNC(OPNAME ## _no_rnd_pixels8_l4)(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
  556. FUNC(OPNAME ## _no_rnd_pixels8_l4)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), src3+8*sizeof(pixel), src4+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
  557. }\
  558. \
  559. static inline void FUNCC(OPNAME ## _pixels2_xy2)(uint8_t *_block, const uint8_t *_pixels, int line_size, int h)\
  560. {\
  561. int i, a0, b0, a1, b1;\
  562. pixel *block = (pixel*)_block;\
  563. const pixel *pixels = (const pixel*)_pixels;\
  564. line_size /= sizeof(pixel);\
  565. a0= pixels[0];\
  566. b0= pixels[1] + 2;\
  567. a0 += b0;\
  568. b0 += pixels[2];\
  569. \
  570. pixels+=line_size;\
  571. for(i=0; i<h; i+=2){\
  572. a1= pixels[0];\
  573. b1= pixels[1];\
  574. a1 += b1;\
  575. b1 += pixels[2];\
  576. \
  577. block[0]= (a1+a0)>>2; /* FIXME non put */\
  578. block[1]= (b1+b0)>>2;\
  579. \
  580. pixels+=line_size;\
  581. block +=line_size;\
  582. \
  583. a0= pixels[0];\
  584. b0= pixels[1] + 2;\
  585. a0 += b0;\
  586. b0 += pixels[2];\
  587. \
  588. block[0]= (a1+a0)>>2;\
  589. block[1]= (b1+b0)>>2;\
  590. pixels+=line_size;\
  591. block +=line_size;\
  592. }\
  593. }\
  594. \
  595. static inline void FUNCC(OPNAME ## _pixels4_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  596. {\
  597. /* FIXME HIGH BIT DEPTH */\
  598. int i;\
  599. const uint32_t a= AV_RN32(pixels );\
  600. const uint32_t b= AV_RN32(pixels+1);\
  601. uint32_t l0= (a&0x03030303UL)\
  602. + (b&0x03030303UL)\
  603. + 0x02020202UL;\
  604. uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
  605. + ((b&0xFCFCFCFCUL)>>2);\
  606. uint32_t l1,h1;\
  607. \
  608. pixels+=line_size;\
  609. for(i=0; i<h; i+=2){\
  610. uint32_t a= AV_RN32(pixels );\
  611. uint32_t b= AV_RN32(pixels+1);\
  612. l1= (a&0x03030303UL)\
  613. + (b&0x03030303UL);\
  614. h1= ((a&0xFCFCFCFCUL)>>2)\
  615. + ((b&0xFCFCFCFCUL)>>2);\
  616. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  617. pixels+=line_size;\
  618. block +=line_size;\
  619. a= AV_RN32(pixels );\
  620. b= AV_RN32(pixels+1);\
  621. l0= (a&0x03030303UL)\
  622. + (b&0x03030303UL)\
  623. + 0x02020202UL;\
  624. h0= ((a&0xFCFCFCFCUL)>>2)\
  625. + ((b&0xFCFCFCFCUL)>>2);\
  626. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  627. pixels+=line_size;\
  628. block +=line_size;\
  629. }\
  630. }\
  631. \
  632. static inline void FUNCC(OPNAME ## _pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  633. {\
  634. /* FIXME HIGH BIT DEPTH */\
  635. int j;\
  636. for(j=0; j<2; j++){\
  637. int i;\
  638. const uint32_t a= AV_RN32(pixels );\
  639. const uint32_t b= AV_RN32(pixels+1);\
  640. uint32_t l0= (a&0x03030303UL)\
  641. + (b&0x03030303UL)\
  642. + 0x02020202UL;\
  643. uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
  644. + ((b&0xFCFCFCFCUL)>>2);\
  645. uint32_t l1,h1;\
  646. \
  647. pixels+=line_size;\
  648. for(i=0; i<h; i+=2){\
  649. uint32_t a= AV_RN32(pixels );\
  650. uint32_t b= AV_RN32(pixels+1);\
  651. l1= (a&0x03030303UL)\
  652. + (b&0x03030303UL);\
  653. h1= ((a&0xFCFCFCFCUL)>>2)\
  654. + ((b&0xFCFCFCFCUL)>>2);\
  655. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  656. pixels+=line_size;\
  657. block +=line_size;\
  658. a= AV_RN32(pixels );\
  659. b= AV_RN32(pixels+1);\
  660. l0= (a&0x03030303UL)\
  661. + (b&0x03030303UL)\
  662. + 0x02020202UL;\
  663. h0= ((a&0xFCFCFCFCUL)>>2)\
  664. + ((b&0xFCFCFCFCUL)>>2);\
  665. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  666. pixels+=line_size;\
  667. block +=line_size;\
  668. }\
  669. pixels+=4-line_size*(h+1);\
  670. block +=4-line_size*h;\
  671. }\
  672. }\
  673. \
  674. static inline void FUNCC(OPNAME ## _no_rnd_pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  675. {\
  676. /* FIXME HIGH BIT DEPTH */\
  677. int j;\
  678. for(j=0; j<2; j++){\
  679. int i;\
  680. const uint32_t a= AV_RN32(pixels );\
  681. const uint32_t b= AV_RN32(pixels+1);\
  682. uint32_t l0= (a&0x03030303UL)\
  683. + (b&0x03030303UL)\
  684. + 0x01010101UL;\
  685. uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
  686. + ((b&0xFCFCFCFCUL)>>2);\
  687. uint32_t l1,h1;\
  688. \
  689. pixels+=line_size;\
  690. for(i=0; i<h; i+=2){\
  691. uint32_t a= AV_RN32(pixels );\
  692. uint32_t b= AV_RN32(pixels+1);\
  693. l1= (a&0x03030303UL)\
  694. + (b&0x03030303UL);\
  695. h1= ((a&0xFCFCFCFCUL)>>2)\
  696. + ((b&0xFCFCFCFCUL)>>2);\
  697. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  698. pixels+=line_size;\
  699. block +=line_size;\
  700. a= AV_RN32(pixels );\
  701. b= AV_RN32(pixels+1);\
  702. l0= (a&0x03030303UL)\
  703. + (b&0x03030303UL)\
  704. + 0x01010101UL;\
  705. h0= ((a&0xFCFCFCFCUL)>>2)\
  706. + ((b&0xFCFCFCFCUL)>>2);\
  707. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  708. pixels+=line_size;\
  709. block +=line_size;\
  710. }\
  711. pixels+=4-line_size*(h+1);\
  712. block +=4-line_size*h;\
  713. }\
  714. }\
  715. \
  716. CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16) , FUNCC(OPNAME ## _pixels8) , 8*sizeof(pixel))\
  717. CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_x2) , FUNCC(OPNAME ## _pixels8_x2) , 8*sizeof(pixel))\
  718. CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_y2) , FUNCC(OPNAME ## _pixels8_y2) , 8*sizeof(pixel))\
  719. CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_xy2), FUNCC(OPNAME ## _pixels8_xy2), 8*sizeof(pixel))\
  720. av_unused CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16) , FUNCC(OPNAME ## _pixels8) , 8*sizeof(pixel))\
  721. CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_x2) , FUNCC(OPNAME ## _no_rnd_pixels8_x2) , 8*sizeof(pixel))\
  722. CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_y2) , FUNCC(OPNAME ## _no_rnd_pixels8_y2) , 8*sizeof(pixel))\
  723. CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_xy2), FUNCC(OPNAME ## _no_rnd_pixels8_xy2), 8*sizeof(pixel))\
  724. #define op_avg(a, b) a = rnd_avg_pixel4(a, b)
  725. #endif
  726. #define op_put(a, b) a = b
  727. PIXOP2(avg, op_avg)
  728. PIXOP2(put, op_put)
  729. #undef op_avg
  730. #undef op_put
  731. #define put_no_rnd_pixels8_c put_pixels8_c
  732. #define put_no_rnd_pixels16_c put_pixels16_c
  733. static void FUNCC(put_no_rnd_pixels16_l2)(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h){
  734. FUNC(put_no_rnd_pixels16_l2)(dst, a, b, stride, stride, stride, h);
  735. }
  736. static void FUNCC(put_no_rnd_pixels8_l2)(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h){
  737. FUNC(put_no_rnd_pixels8_l2)(dst, a, b, stride, stride, stride, h);
  738. }
  739. #define H264_CHROMA_MC(OPNAME, OP)\
  740. static void FUNCC(OPNAME ## h264_chroma_mc2)(uint8_t *_dst/*align 8*/, uint8_t *_src/*align 1*/, int stride, int h, int x, int y){\
  741. pixel *dst = (pixel*)_dst;\
  742. pixel *src = (pixel*)_src;\
  743. const int A=(8-x)*(8-y);\
  744. const int B=( x)*(8-y);\
  745. const int C=(8-x)*( y);\
  746. const int D=( x)*( y);\
  747. int i;\
  748. stride /= sizeof(pixel);\
  749. \
  750. assert(x<8 && y<8 && x>=0 && y>=0);\
  751. \
  752. if(D){\
  753. for(i=0; i<h; i++){\
  754. OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
  755. OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
  756. dst+= stride;\
  757. src+= stride;\
  758. }\
  759. }else{\
  760. const int E= B+C;\
  761. const int step= C ? stride : 1;\
  762. for(i=0; i<h; i++){\
  763. OP(dst[0], (A*src[0] + E*src[step+0]));\
  764. OP(dst[1], (A*src[1] + E*src[step+1]));\
  765. dst+= stride;\
  766. src+= stride;\
  767. }\
  768. }\
  769. }\
  770. \
  771. static void FUNCC(OPNAME ## h264_chroma_mc4)(uint8_t *_dst/*align 8*/, uint8_t *_src/*align 1*/, int stride, int h, int x, int y){\
  772. pixel *dst = (pixel*)_dst;\
  773. pixel *src = (pixel*)_src;\
  774. const int A=(8-x)*(8-y);\
  775. const int B=( x)*(8-y);\
  776. const int C=(8-x)*( y);\
  777. const int D=( x)*( y);\
  778. int i;\
  779. stride /= sizeof(pixel);\
  780. \
  781. assert(x<8 && y<8 && x>=0 && y>=0);\
  782. \
  783. if(D){\
  784. for(i=0; i<h; i++){\
  785. OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
  786. OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
  787. OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
  788. OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
  789. dst+= stride;\
  790. src+= stride;\
  791. }\
  792. }else{\
  793. const int E= B+C;\
  794. const int step= C ? stride : 1;\
  795. for(i=0; i<h; i++){\
  796. OP(dst[0], (A*src[0] + E*src[step+0]));\
  797. OP(dst[1], (A*src[1] + E*src[step+1]));\
  798. OP(dst[2], (A*src[2] + E*src[step+2]));\
  799. OP(dst[3], (A*src[3] + E*src[step+3]));\
  800. dst+= stride;\
  801. src+= stride;\
  802. }\
  803. }\
  804. }\
  805. \
  806. static void FUNCC(OPNAME ## h264_chroma_mc8)(uint8_t *_dst/*align 8*/, uint8_t *_src/*align 1*/, int stride, int h, int x, int y){\
  807. pixel *dst = (pixel*)_dst;\
  808. pixel *src = (pixel*)_src;\
  809. const int A=(8-x)*(8-y);\
  810. const int B=( x)*(8-y);\
  811. const int C=(8-x)*( y);\
  812. const int D=( x)*( y);\
  813. int i;\
  814. stride /= sizeof(pixel);\
  815. \
  816. assert(x<8 && y<8 && x>=0 && y>=0);\
  817. \
  818. if(D){\
  819. for(i=0; i<h; i++){\
  820. OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
  821. OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
  822. OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
  823. OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
  824. OP(dst[4], (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5]));\
  825. OP(dst[5], (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6]));\
  826. OP(dst[6], (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7]));\
  827. OP(dst[7], (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8]));\
  828. dst+= stride;\
  829. src+= stride;\
  830. }\
  831. }else{\
  832. const int E= B+C;\
  833. const int step= C ? stride : 1;\
  834. for(i=0; i<h; i++){\
  835. OP(dst[0], (A*src[0] + E*src[step+0]));\
  836. OP(dst[1], (A*src[1] + E*src[step+1]));\
  837. OP(dst[2], (A*src[2] + E*src[step+2]));\
  838. OP(dst[3], (A*src[3] + E*src[step+3]));\
  839. OP(dst[4], (A*src[4] + E*src[step+4]));\
  840. OP(dst[5], (A*src[5] + E*src[step+5]));\
  841. OP(dst[6], (A*src[6] + E*src[step+6]));\
  842. OP(dst[7], (A*src[7] + E*src[step+7]));\
  843. dst+= stride;\
  844. src+= stride;\
  845. }\
  846. }\
  847. }
  848. #define op_avg(a, b) a = (((a)+(((b) + 32)>>6)+1)>>1)
  849. #define op_put(a, b) a = (((b) + 32)>>6)
  850. H264_CHROMA_MC(put_ , op_put)
  851. H264_CHROMA_MC(avg_ , op_avg)
  852. #undef op_avg
  853. #undef op_put
  854. #define H264_LOWPASS(OPNAME, OP, OP2) \
  855. static av_unused void FUNC(OPNAME ## h264_qpel2_h_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
  856. const int h=2;\
  857. INIT_CLIP\
  858. int i;\
  859. pixel *dst = (pixel*)_dst;\
  860. pixel *src = (pixel*)_src;\
  861. dstStride /= sizeof(pixel);\
  862. srcStride /= sizeof(pixel);\
  863. for(i=0; i<h; i++)\
  864. {\
  865. OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
  866. OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
  867. dst+=dstStride;\
  868. src+=srcStride;\
  869. }\
  870. }\
  871. \
  872. static av_unused void FUNC(OPNAME ## h264_qpel2_v_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
  873. const int w=2;\
  874. INIT_CLIP\
  875. int i;\
  876. pixel *dst = (pixel*)_dst;\
  877. pixel *src = (pixel*)_src;\
  878. dstStride /= sizeof(pixel);\
  879. srcStride /= sizeof(pixel);\
  880. for(i=0; i<w; i++)\
  881. {\
  882. const int srcB= src[-2*srcStride];\
  883. const int srcA= src[-1*srcStride];\
  884. const int src0= src[0 *srcStride];\
  885. const int src1= src[1 *srcStride];\
  886. const int src2= src[2 *srcStride];\
  887. const int src3= src[3 *srcStride];\
  888. const int src4= src[4 *srcStride];\
  889. OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
  890. OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
  891. dst++;\
  892. src++;\
  893. }\
  894. }\
  895. \
  896. static av_unused void FUNC(OPNAME ## h264_qpel2_hv_lowpass)(uint8_t *_dst, int16_t *tmp, uint8_t *_src, int dstStride, int tmpStride, int srcStride){\
  897. const int h=2;\
  898. const int w=2;\
  899. const int pad = (BIT_DEPTH > 9) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
  900. INIT_CLIP\
  901. int i;\
  902. pixel *dst = (pixel*)_dst;\
  903. pixel *src = (pixel*)_src;\
  904. dstStride /= sizeof(pixel);\
  905. srcStride /= sizeof(pixel);\
  906. src -= 2*srcStride;\
  907. for(i=0; i<h+5; i++)\
  908. {\
  909. tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]) + pad;\
  910. tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]) + pad;\
  911. tmp+=tmpStride;\
  912. src+=srcStride;\
  913. }\
  914. tmp -= tmpStride*(h+5-2);\
  915. for(i=0; i<w; i++)\
  916. {\
  917. const int tmpB= tmp[-2*tmpStride] - pad;\
  918. const int tmpA= tmp[-1*tmpStride] - pad;\
  919. const int tmp0= tmp[0 *tmpStride] - pad;\
  920. const int tmp1= tmp[1 *tmpStride] - pad;\
  921. const int tmp2= tmp[2 *tmpStride] - pad;\
  922. const int tmp3= tmp[3 *tmpStride] - pad;\
  923. const int tmp4= tmp[4 *tmpStride] - pad;\
  924. OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
  925. OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
  926. dst++;\
  927. tmp++;\
  928. }\
  929. }\
  930. static void FUNC(OPNAME ## h264_qpel4_h_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
  931. const int h=4;\
  932. INIT_CLIP\
  933. int i;\
  934. pixel *dst = (pixel*)_dst;\
  935. pixel *src = (pixel*)_src;\
  936. dstStride /= sizeof(pixel);\
  937. srcStride /= sizeof(pixel);\
  938. for(i=0; i<h; i++)\
  939. {\
  940. OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
  941. OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
  942. OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]));\
  943. OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]));\
  944. dst+=dstStride;\
  945. src+=srcStride;\
  946. }\
  947. }\
  948. \
  949. static void FUNC(OPNAME ## h264_qpel4_v_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
  950. const int w=4;\
  951. INIT_CLIP\
  952. int i;\
  953. pixel *dst = (pixel*)_dst;\
  954. pixel *src = (pixel*)_src;\
  955. dstStride /= sizeof(pixel);\
  956. srcStride /= sizeof(pixel);\
  957. for(i=0; i<w; i++)\
  958. {\
  959. const int srcB= src[-2*srcStride];\
  960. const int srcA= src[-1*srcStride];\
  961. const int src0= src[0 *srcStride];\
  962. const int src1= src[1 *srcStride];\
  963. const int src2= src[2 *srcStride];\
  964. const int src3= src[3 *srcStride];\
  965. const int src4= src[4 *srcStride];\
  966. const int src5= src[5 *srcStride];\
  967. const int src6= src[6 *srcStride];\
  968. OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
  969. OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
  970. OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
  971. OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
  972. dst++;\
  973. src++;\
  974. }\
  975. }\
  976. \
  977. static void FUNC(OPNAME ## h264_qpel4_hv_lowpass)(uint8_t *_dst, int16_t *tmp, uint8_t *_src, int dstStride, int tmpStride, int srcStride){\
  978. const int h=4;\
  979. const int w=4;\
  980. const int pad = (BIT_DEPTH > 9) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
  981. INIT_CLIP\
  982. int i;\
  983. pixel *dst = (pixel*)_dst;\
  984. pixel *src = (pixel*)_src;\
  985. dstStride /= sizeof(pixel);\
  986. srcStride /= sizeof(pixel);\
  987. src -= 2*srcStride;\
  988. for(i=0; i<h+5; i++)\
  989. {\
  990. tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]) + pad;\
  991. tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]) + pad;\
  992. tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]) + pad;\
  993. tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]) + pad;\
  994. tmp+=tmpStride;\
  995. src+=srcStride;\
  996. }\
  997. tmp -= tmpStride*(h+5-2);\
  998. for(i=0; i<w; i++)\
  999. {\
  1000. const int tmpB= tmp[-2*tmpStride] - pad;\
  1001. const int tmpA= tmp[-1*tmpStride] - pad;\
  1002. const int tmp0= tmp[0 *tmpStride] - pad;\
  1003. const int tmp1= tmp[1 *tmpStride] - pad;\
  1004. const int tmp2= tmp[2 *tmpStride] - pad;\
  1005. const int tmp3= tmp[3 *tmpStride] - pad;\
  1006. const int tmp4= tmp[4 *tmpStride] - pad;\
  1007. const int tmp5= tmp[5 *tmpStride] - pad;\
  1008. const int tmp6= tmp[6 *tmpStride] - pad;\
  1009. OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
  1010. OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
  1011. OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
  1012. OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
  1013. dst++;\
  1014. tmp++;\
  1015. }\
  1016. }\
  1017. \
  1018. static void FUNC(OPNAME ## h264_qpel8_h_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
  1019. const int h=8;\
  1020. INIT_CLIP\
  1021. int i;\
  1022. pixel *dst = (pixel*)_dst;\
  1023. pixel *src = (pixel*)_src;\
  1024. dstStride /= sizeof(pixel);\
  1025. srcStride /= sizeof(pixel);\
  1026. for(i=0; i<h; i++)\
  1027. {\
  1028. OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]));\
  1029. OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]));\
  1030. OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]));\
  1031. OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]));\
  1032. OP(dst[4], (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]));\
  1033. OP(dst[5], (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]));\
  1034. OP(dst[6], (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]));\
  1035. OP(dst[7], (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]));\
  1036. dst+=dstStride;\
  1037. src+=srcStride;\
  1038. }\
  1039. }\
  1040. \
  1041. static void FUNC(OPNAME ## h264_qpel8_v_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
  1042. const int w=8;\
  1043. INIT_CLIP\
  1044. int i;\
  1045. pixel *dst = (pixel*)_dst;\
  1046. pixel *src = (pixel*)_src;\
  1047. dstStride /= sizeof(pixel);\
  1048. srcStride /= sizeof(pixel);\
  1049. for(i=0; i<w; i++)\
  1050. {\
  1051. const int srcB= src[-2*srcStride];\
  1052. const int srcA= src[-1*srcStride];\
  1053. const int src0= src[0 *srcStride];\
  1054. const int src1= src[1 *srcStride];\
  1055. const int src2= src[2 *srcStride];\
  1056. const int src3= src[3 *srcStride];\
  1057. const int src4= src[4 *srcStride];\
  1058. const int src5= src[5 *srcStride];\
  1059. const int src6= src[6 *srcStride];\
  1060. const int src7= src[7 *srcStride];\
  1061. const int src8= src[8 *srcStride];\
  1062. const int src9= src[9 *srcStride];\
  1063. const int src10=src[10*srcStride];\
  1064. OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
  1065. OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
  1066. OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
  1067. OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
  1068. OP(dst[4*dstStride], (src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
  1069. OP(dst[5*dstStride], (src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
  1070. OP(dst[6*dstStride], (src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
  1071. OP(dst[7*dstStride], (src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
  1072. dst++;\
  1073. src++;\
  1074. }\
  1075. }\
  1076. \
  1077. static void FUNC(OPNAME ## h264_qpel8_hv_lowpass)(uint8_t *_dst, int16_t *tmp, uint8_t *_src, int dstStride, int tmpStride, int srcStride){\
  1078. const int h=8;\
  1079. const int w=8;\
  1080. const int pad = (BIT_DEPTH > 9) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
  1081. INIT_CLIP\
  1082. int i;\
  1083. pixel *dst = (pixel*)_dst;\
  1084. pixel *src = (pixel*)_src;\
  1085. dstStride /= sizeof(pixel);\
  1086. srcStride /= sizeof(pixel);\
  1087. src -= 2*srcStride;\
  1088. for(i=0; i<h+5; i++)\
  1089. {\
  1090. tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]) + pad;\
  1091. tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]) + pad;\
  1092. tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]) + pad;\
  1093. tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]) + pad;\
  1094. tmp[4]= (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]) + pad;\
  1095. tmp[5]= (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]) + pad;\
  1096. tmp[6]= (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]) + pad;\
  1097. tmp[7]= (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]) + pad;\
  1098. tmp+=tmpStride;\
  1099. src+=srcStride;\
  1100. }\
  1101. tmp -= tmpStride*(h+5-2);\
  1102. for(i=0; i<w; i++)\
  1103. {\
  1104. const int tmpB= tmp[-2*tmpStride] - pad;\
  1105. const int tmpA= tmp[-1*tmpStride] - pad;\
  1106. const int tmp0= tmp[0 *tmpStride] - pad;\
  1107. const int tmp1= tmp[1 *tmpStride] - pad;\
  1108. const int tmp2= tmp[2 *tmpStride] - pad;\
  1109. const int tmp3= tmp[3 *tmpStride] - pad;\
  1110. const int tmp4= tmp[4 *tmpStride] - pad;\
  1111. const int tmp5= tmp[5 *tmpStride] - pad;\
  1112. const int tmp6= tmp[6 *tmpStride] - pad;\
  1113. const int tmp7= tmp[7 *tmpStride] - pad;\
  1114. const int tmp8= tmp[8 *tmpStride] - pad;\
  1115. const int tmp9= tmp[9 *tmpStride] - pad;\
  1116. const int tmp10=tmp[10*tmpStride] - pad;\
  1117. OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
  1118. OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
  1119. OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
  1120. OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
  1121. OP2(dst[4*dstStride], (tmp4+tmp5)*20 - (tmp3+tmp6)*5 + (tmp2+tmp7));\
  1122. OP2(dst[5*dstStride], (tmp5+tmp6)*20 - (tmp4+tmp7)*5 + (tmp3+tmp8));\
  1123. OP2(dst[6*dstStride], (tmp6+tmp7)*20 - (tmp5+tmp8)*5 + (tmp4+tmp9));\
  1124. OP2(dst[7*dstStride], (tmp7+tmp8)*20 - (tmp6+tmp9)*5 + (tmp5+tmp10));\
  1125. dst++;\
  1126. tmp++;\
  1127. }\
  1128. }\
  1129. \
  1130. static void FUNC(OPNAME ## h264_qpel16_v_lowpass)(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1131. FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst , src , dstStride, srcStride);\
  1132. FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
  1133. src += 8*srcStride;\
  1134. dst += 8*dstStride;\
  1135. FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst , src , dstStride, srcStride);\
  1136. FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
  1137. }\
  1138. \
  1139. static void FUNC(OPNAME ## h264_qpel16_h_lowpass)(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1140. FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst , src , dstStride, srcStride);\
  1141. FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
  1142. src += 8*srcStride;\
  1143. dst += 8*dstStride;\
  1144. FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst , src , dstStride, srcStride);\
  1145. FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
  1146. }\
  1147. \
  1148. static void FUNC(OPNAME ## h264_qpel16_hv_lowpass)(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  1149. FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst , tmp , src , dstStride, tmpStride, srcStride);\
  1150. FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst+8*sizeof(pixel), tmp+8, src+8*sizeof(pixel), dstStride, tmpStride, srcStride);\
  1151. src += 8*srcStride;\
  1152. dst += 8*dstStride;\
  1153. FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst , tmp , src , dstStride, tmpStride, srcStride);\
  1154. FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst+8*sizeof(pixel), tmp+8, src+8*sizeof(pixel), dstStride, tmpStride, srcStride);\
  1155. }\
  1156. #define H264_MC(OPNAME, SIZE) \
  1157. static av_unused void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc00)(uint8_t *dst, uint8_t *src, int stride){\
  1158. FUNCC(OPNAME ## pixels ## SIZE)(dst, src, stride, SIZE);\
  1159. }\
  1160. \
  1161. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc10)(uint8_t *dst, uint8_t *src, int stride){\
  1162. uint8_t half[SIZE*SIZE*sizeof(pixel)];\
  1163. FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(half, src, SIZE*sizeof(pixel), stride);\
  1164. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, src, half, stride, stride, SIZE*sizeof(pixel), SIZE);\
  1165. }\
  1166. \
  1167. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc20)(uint8_t *dst, uint8_t *src, int stride){\
  1168. FUNC(OPNAME ## h264_qpel ## SIZE ## _h_lowpass)(dst, src, stride, stride);\
  1169. }\
  1170. \
  1171. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc30)(uint8_t *dst, uint8_t *src, int stride){\
  1172. uint8_t half[SIZE*SIZE*sizeof(pixel)];\
  1173. FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(half, src, SIZE*sizeof(pixel), stride);\
  1174. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, src+sizeof(pixel), half, stride, stride, SIZE*sizeof(pixel), SIZE);\
  1175. }\
  1176. \
  1177. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc01)(uint8_t *dst, uint8_t *src, int stride){\
  1178. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1179. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1180. uint8_t half[SIZE*SIZE*sizeof(pixel)];\
  1181. FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
  1182. FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(half, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
  1183. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, full_mid, half, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1184. }\
  1185. \
  1186. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc02)(uint8_t *dst, uint8_t *src, int stride){\
  1187. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1188. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1189. FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
  1190. FUNC(OPNAME ## h264_qpel ## SIZE ## _v_lowpass)(dst, full_mid, stride, SIZE*sizeof(pixel));\
  1191. }\
  1192. \
  1193. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc03)(uint8_t *dst, uint8_t *src, int stride){\
  1194. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1195. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1196. uint8_t half[SIZE*SIZE*sizeof(pixel)];\
  1197. FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
  1198. FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(half, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
  1199. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, full_mid+SIZE*sizeof(pixel), half, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1200. }\
  1201. \
  1202. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc11)(uint8_t *dst, uint8_t *src, int stride){\
  1203. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1204. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1205. uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
  1206. uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
  1207. FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
  1208. FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
  1209. FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
  1210. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1211. }\
  1212. \
  1213. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc31)(uint8_t *dst, uint8_t *src, int stride){\
  1214. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1215. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1216. uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
  1217. uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
  1218. FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
  1219. FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel), stride, SIZE + 5);\
  1220. FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
  1221. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1222. }\
  1223. \
  1224. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc13)(uint8_t *dst, uint8_t *src, int stride){\
  1225. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1226. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1227. uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
  1228. uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
  1229. FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
  1230. FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
  1231. FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
  1232. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1233. }\
  1234. \
  1235. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc33)(uint8_t *dst, uint8_t *src, int stride){\
  1236. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1237. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1238. uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
  1239. uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
  1240. FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
  1241. FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel), stride, SIZE + 5);\
  1242. FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
  1243. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1244. }\
  1245. \
  1246. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc22)(uint8_t *dst, uint8_t *src, int stride){\
  1247. int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
  1248. FUNC(OPNAME ## h264_qpel ## SIZE ## _hv_lowpass)(dst, tmp, src, stride, SIZE*sizeof(pixel), stride);\
  1249. }\
  1250. \
  1251. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc21)(uint8_t *dst, uint8_t *src, int stride){\
  1252. int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
  1253. uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
  1254. uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
  1255. FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
  1256. FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
  1257. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1258. }\
  1259. \
  1260. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc23)(uint8_t *dst, uint8_t *src, int stride){\
  1261. int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
  1262. uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
  1263. uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
  1264. FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
  1265. FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
  1266. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1267. }\
  1268. \
  1269. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc12)(uint8_t *dst, uint8_t *src, int stride){\
  1270. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1271. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1272. int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
  1273. uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
  1274. uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
  1275. FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
  1276. FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
  1277. FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
  1278. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfV, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1279. }\
  1280. \
  1281. static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc32)(uint8_t *dst, uint8_t *src, int stride){\
  1282. uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
  1283. uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
  1284. int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
  1285. uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
  1286. uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
  1287. FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel), stride, SIZE + 5);\
  1288. FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
  1289. FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
  1290. FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfV, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
  1291. }\
  1292. #define op_avg(a, b) a = (((a)+CLIP(((b) + 16)>>5)+1)>>1)
  1293. //#define op_avg2(a, b) a = (((a)*w1+cm[((b) + 16)>>5]*w2 + o + 64)>>7)
  1294. #define op_put(a, b) a = CLIP(((b) + 16)>>5)
  1295. #define op2_avg(a, b) a = (((a)+CLIP(((b) + 512)>>10)+1)>>1)
  1296. #define op2_put(a, b) a = CLIP(((b) + 512)>>10)
  1297. H264_LOWPASS(put_ , op_put, op2_put)
  1298. H264_LOWPASS(avg_ , op_avg, op2_avg)
  1299. H264_MC(put_, 2)
  1300. H264_MC(put_, 4)
  1301. H264_MC(put_, 8)
  1302. H264_MC(put_, 16)
  1303. H264_MC(avg_, 4)
  1304. H264_MC(avg_, 8)
  1305. H264_MC(avg_, 16)
  1306. #undef op_avg
  1307. #undef op_put
  1308. #undef op2_avg
  1309. #undef op2_put
  1310. #if BIT_DEPTH == 8
  1311. # define put_h264_qpel8_mc00_8_c ff_put_pixels8x8_8_c
  1312. # define avg_h264_qpel8_mc00_8_c ff_avg_pixels8x8_8_c
  1313. # define put_h264_qpel16_mc00_8_c ff_put_pixels16x16_8_c
  1314. # define avg_h264_qpel16_mc00_8_c ff_avg_pixels16x16_8_c
  1315. #elif BIT_DEPTH == 9
  1316. # define put_h264_qpel8_mc00_9_c ff_put_pixels8x8_9_c
  1317. # define avg_h264_qpel8_mc00_9_c ff_avg_pixels8x8_9_c
  1318. # define put_h264_qpel16_mc00_9_c ff_put_pixels16x16_9_c
  1319. # define avg_h264_qpel16_mc00_9_c ff_avg_pixels16x16_9_c
  1320. #elif BIT_DEPTH == 10
  1321. # define put_h264_qpel8_mc00_10_c ff_put_pixels8x8_10_c
  1322. # define avg_h264_qpel8_mc00_10_c ff_avg_pixels8x8_10_c
  1323. # define put_h264_qpel16_mc00_10_c ff_put_pixels16x16_10_c
  1324. # define avg_h264_qpel16_mc00_10_c ff_avg_pixels16x16_10_c
  1325. #endif
  1326. void FUNCC(ff_put_pixels8x8)(uint8_t *dst, uint8_t *src, int stride) {
  1327. FUNCC(put_pixels8)(dst, src, stride, 8);
  1328. }
  1329. void FUNCC(ff_avg_pixels8x8)(uint8_t *dst, uint8_t *src, int stride) {
  1330. FUNCC(avg_pixels8)(dst, src, stride, 8);
  1331. }
  1332. void FUNCC(ff_put_pixels16x16)(uint8_t *dst, uint8_t *src, int stride) {
  1333. FUNCC(put_pixels16)(dst, src, stride, 16);
  1334. }
  1335. void FUNCC(ff_avg_pixels16x16)(uint8_t *dst, uint8_t *src, int stride) {
  1336. FUNCC(avg_pixels16)(dst, src, stride, 16);
  1337. }
  1338. static void FUNCC(clear_block)(DCTELEM *block)
  1339. {
  1340. memset(block, 0, sizeof(dctcoef)*64);
  1341. }
  1342. /**
  1343. * memset(blocks, 0, sizeof(DCTELEM)*6*64)
  1344. */
  1345. static void FUNCC(clear_blocks)(DCTELEM *blocks)
  1346. {
  1347. memset(blocks, 0, sizeof(dctcoef)*6*64);
  1348. }