You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1258 lines
48KB

  1. /*
  2. * DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * gmc & q-pel & 32/64 bit based MC by Michael Niedermayer <michaelni@gmx.at>
  7. *
  8. * This file is part of Libav.
  9. *
  10. * Libav is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * Libav is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with Libav; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. /**
  25. * @file
  26. * DSP utils
  27. */
  28. #include "dsputil.h"
  29. /* draw the edges of width 'w' of an image of size width, height */
  30. //FIXME check that this is ok for mpeg4 interlaced
  31. static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w, int sides)
  32. {
  33. uint8_t *ptr, *last_line;
  34. int i;
  35. /* left and right */
  36. ptr = buf;
  37. for(i=0;i<height;i++) {
  38. memset(ptr - w, ptr[0], w);
  39. memset(ptr + width, ptr[width-1], w);
  40. ptr += wrap;
  41. }
  42. /* top and bottom + corners */
  43. buf -= w;
  44. last_line = buf + (height - 1) * wrap;
  45. if (sides & EDGE_TOP)
  46. for(i = 0; i < w; i++)
  47. memcpy(buf - (i + 1) * wrap, buf, width + w + w); // top
  48. if (sides & EDGE_BOTTOM)
  49. for (i = 0; i < w; i++)
  50. memcpy(last_line + (i + 1) * wrap, last_line, width + w + w); // bottom
  51. }
  52. /**
  53. * Copy a rectangular area of samples to a temporary buffer and replicate the border samples.
  54. * @param buf destination buffer
  55. * @param src source buffer
  56. * @param linesize number of bytes between 2 vertically adjacent samples in both the source and destination buffers
  57. * @param block_w width of block
  58. * @param block_h height of block
  59. * @param src_x x coordinate of the top left sample of the block in the source buffer
  60. * @param src_y y coordinate of the top left sample of the block in the source buffer
  61. * @param w width of the source buffer
  62. * @param h height of the source buffer
  63. */
  64. void ff_emulated_edge_mc(uint8_t *buf, const uint8_t *src, int linesize, int block_w, int block_h,
  65. int src_x, int src_y, int w, int h){
  66. int x, y;
  67. int start_y, start_x, end_y, end_x;
  68. if(src_y>= h){
  69. src+= (h-1-src_y)*linesize;
  70. src_y=h-1;
  71. }else if(src_y<=-block_h){
  72. src+= (1-block_h-src_y)*linesize;
  73. src_y=1-block_h;
  74. }
  75. if(src_x>= w){
  76. src+= (w-1-src_x);
  77. src_x=w-1;
  78. }else if(src_x<=-block_w){
  79. src+= (1-block_w-src_x);
  80. src_x=1-block_w;
  81. }
  82. start_y= FFMAX(0, -src_y);
  83. start_x= FFMAX(0, -src_x);
  84. end_y= FFMIN(block_h, h-src_y);
  85. end_x= FFMIN(block_w, w-src_x);
  86. assert(start_y < end_y && block_h);
  87. assert(start_x < end_x && block_w);
  88. w = end_x - start_x;
  89. src += start_y*linesize + start_x;
  90. buf += start_x;
  91. //top
  92. for(y=0; y<start_y; y++){
  93. memcpy(buf, src, w);
  94. buf += linesize;
  95. }
  96. // copy existing part
  97. for(; y<end_y; y++){
  98. memcpy(buf, src, w);
  99. src += linesize;
  100. buf += linesize;
  101. }
  102. //bottom
  103. src -= linesize;
  104. for(; y<block_h; y++){
  105. memcpy(buf, src, w);
  106. buf += linesize;
  107. }
  108. buf -= block_h * linesize + start_x;
  109. while (block_h--){
  110. //left
  111. for(x=0; x<start_x; x++){
  112. buf[x] = buf[start_x];
  113. }
  114. //right
  115. for(x=end_x; x<block_w; x++){
  116. buf[x] = buf[end_x - 1];
  117. }
  118. buf += linesize;
  119. }
  120. }
  121. static void add_pixels8_c(uint8_t *restrict pixels, DCTELEM *block, int line_size)
  122. {
  123. int i;
  124. for(i=0;i<8;i++) {
  125. pixels[0] += block[0];
  126. pixels[1] += block[1];
  127. pixels[2] += block[2];
  128. pixels[3] += block[3];
  129. pixels[4] += block[4];
  130. pixels[5] += block[5];
  131. pixels[6] += block[6];
  132. pixels[7] += block[7];
  133. pixels += line_size;
  134. block += 8;
  135. }
  136. }
  137. static void add_pixels4_c(uint8_t *restrict pixels, DCTELEM *block, int line_size)
  138. {
  139. int i;
  140. for(i=0;i<4;i++) {
  141. pixels[0] += block[0];
  142. pixels[1] += block[1];
  143. pixels[2] += block[2];
  144. pixels[3] += block[3];
  145. pixels += line_size;
  146. block += 4;
  147. }
  148. }
  149. #if 0
  150. #define PIXOP2(OPNAME, OP) \
  151. static void OPNAME ## _pixels(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  152. {\
  153. int i;\
  154. for(i=0; i<h; i++){\
  155. OP(*((uint64_t*)block), AV_RN64(pixels));\
  156. pixels+=line_size;\
  157. block +=line_size;\
  158. }\
  159. }\
  160. \
  161. static void OPNAME ## _no_rnd_pixels_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  162. {\
  163. int i;\
  164. for(i=0; i<h; i++){\
  165. const uint64_t a= AV_RN64(pixels );\
  166. const uint64_t b= AV_RN64(pixels+1);\
  167. OP(*((uint64_t*)block), (a&b) + (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
  168. pixels+=line_size;\
  169. block +=line_size;\
  170. }\
  171. }\
  172. \
  173. static void OPNAME ## _pixels_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  174. {\
  175. int i;\
  176. for(i=0; i<h; i++){\
  177. const uint64_t a= AV_RN64(pixels );\
  178. const uint64_t b= AV_RN64(pixels+1);\
  179. OP(*((uint64_t*)block), (a|b) - (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
  180. pixels+=line_size;\
  181. block +=line_size;\
  182. }\
  183. }\
  184. \
  185. static void OPNAME ## _no_rnd_pixels_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  186. {\
  187. int i;\
  188. for(i=0; i<h; i++){\
  189. const uint64_t a= AV_RN64(pixels );\
  190. const uint64_t b= AV_RN64(pixels+line_size);\
  191. OP(*((uint64_t*)block), (a&b) + (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
  192. pixels+=line_size;\
  193. block +=line_size;\
  194. }\
  195. }\
  196. \
  197. static void OPNAME ## _pixels_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  198. {\
  199. int i;\
  200. for(i=0; i<h; i++){\
  201. const uint64_t a= AV_RN64(pixels );\
  202. const uint64_t b= AV_RN64(pixels+line_size);\
  203. OP(*((uint64_t*)block), (a|b) - (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
  204. pixels+=line_size;\
  205. block +=line_size;\
  206. }\
  207. }\
  208. \
  209. static void OPNAME ## _pixels_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  210. {\
  211. int i;\
  212. const uint64_t a= AV_RN64(pixels );\
  213. const uint64_t b= AV_RN64(pixels+1);\
  214. uint64_t l0= (a&0x0303030303030303ULL)\
  215. + (b&0x0303030303030303ULL)\
  216. + 0x0202020202020202ULL;\
  217. uint64_t h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
  218. + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
  219. uint64_t l1,h1;\
  220. \
  221. pixels+=line_size;\
  222. for(i=0; i<h; i+=2){\
  223. uint64_t a= AV_RN64(pixels );\
  224. uint64_t b= AV_RN64(pixels+1);\
  225. l1= (a&0x0303030303030303ULL)\
  226. + (b&0x0303030303030303ULL);\
  227. h1= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
  228. + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
  229. OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
  230. pixels+=line_size;\
  231. block +=line_size;\
  232. a= AV_RN64(pixels );\
  233. b= AV_RN64(pixels+1);\
  234. l0= (a&0x0303030303030303ULL)\
  235. + (b&0x0303030303030303ULL)\
  236. + 0x0202020202020202ULL;\
  237. h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
  238. + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
  239. OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
  240. pixels+=line_size;\
  241. block +=line_size;\
  242. }\
  243. }\
  244. \
  245. static void OPNAME ## _no_rnd_pixels_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  246. {\
  247. int i;\
  248. const uint64_t a= AV_RN64(pixels );\
  249. const uint64_t b= AV_RN64(pixels+1);\
  250. uint64_t l0= (a&0x0303030303030303ULL)\
  251. + (b&0x0303030303030303ULL)\
  252. + 0x0101010101010101ULL;\
  253. uint64_t h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
  254. + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
  255. uint64_t l1,h1;\
  256. \
  257. pixels+=line_size;\
  258. for(i=0; i<h; i+=2){\
  259. uint64_t a= AV_RN64(pixels );\
  260. uint64_t b= AV_RN64(pixels+1);\
  261. l1= (a&0x0303030303030303ULL)\
  262. + (b&0x0303030303030303ULL);\
  263. h1= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
  264. + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
  265. OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
  266. pixels+=line_size;\
  267. block +=line_size;\
  268. a= AV_RN64(pixels );\
  269. b= AV_RN64(pixels+1);\
  270. l0= (a&0x0303030303030303ULL)\
  271. + (b&0x0303030303030303ULL)\
  272. + 0x0101010101010101ULL;\
  273. h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
  274. + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
  275. OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
  276. pixels+=line_size;\
  277. block +=line_size;\
  278. }\
  279. }\
  280. \
  281. CALL_2X_PIXELS(OPNAME ## _pixels16_c , OPNAME ## _pixels_c , 8)\
  282. CALL_2X_PIXELS(OPNAME ## _pixels16_x2_c , OPNAME ## _pixels_x2_c , 8)\
  283. CALL_2X_PIXELS(OPNAME ## _pixels16_y2_c , OPNAME ## _pixels_y2_c , 8)\
  284. CALL_2X_PIXELS(OPNAME ## _pixels16_xy2_c, OPNAME ## _pixels_xy2_c, 8)\
  285. CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_x2_c , OPNAME ## _no_rnd_pixels_x2_c , 8)\
  286. CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_y2_c , OPNAME ## _no_rnd_pixels_y2_c , 8)\
  287. CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_xy2_c, OPNAME ## _no_rnd_pixels_xy2_c, 8)
  288. #define op_avg(a, b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEFEFEFEFEULL)>>1) )
  289. #else // 64 bit variant
  290. #define PIXOP2(OPNAME, OP) \
  291. static void OPNAME ## _pixels2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  292. int i;\
  293. for(i=0; i<h; i++){\
  294. OP(*((uint16_t*)(block )), AV_RN16(pixels ));\
  295. pixels+=line_size;\
  296. block +=line_size;\
  297. }\
  298. }\
  299. static void OPNAME ## _pixels4_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  300. int i;\
  301. for(i=0; i<h; i++){\
  302. OP(*((uint32_t*)(block )), AV_RN32(pixels ));\
  303. pixels+=line_size;\
  304. block +=line_size;\
  305. }\
  306. }\
  307. static void OPNAME ## _pixels8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  308. int i;\
  309. for(i=0; i<h; i++){\
  310. OP(*((uint32_t*)(block )), AV_RN32(pixels ));\
  311. OP(*((uint32_t*)(block+4)), AV_RN32(pixels+4));\
  312. pixels+=line_size;\
  313. block +=line_size;\
  314. }\
  315. }\
  316. static inline void OPNAME ## _no_rnd_pixels8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  317. OPNAME ## _pixels8_c(block, pixels, line_size, h);\
  318. }\
  319. \
  320. static inline void OPNAME ## _no_rnd_pixels8_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  321. int src_stride1, int src_stride2, int h){\
  322. int i;\
  323. for(i=0; i<h; i++){\
  324. uint32_t a,b;\
  325. a= AV_RN32(&src1[i*src_stride1 ]);\
  326. b= AV_RN32(&src2[i*src_stride2 ]);\
  327. OP(*((uint32_t*)&dst[i*dst_stride ]), no_rnd_avg32(a, b));\
  328. a= AV_RN32(&src1[i*src_stride1+4]);\
  329. b= AV_RN32(&src2[i*src_stride2+4]);\
  330. OP(*((uint32_t*)&dst[i*dst_stride+4]), no_rnd_avg32(a, b));\
  331. }\
  332. }\
  333. \
  334. static inline void OPNAME ## _pixels8_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  335. int src_stride1, int src_stride2, int h){\
  336. int i;\
  337. for(i=0; i<h; i++){\
  338. uint32_t a,b;\
  339. a= AV_RN32(&src1[i*src_stride1 ]);\
  340. b= AV_RN32(&src2[i*src_stride2 ]);\
  341. OP(*((uint32_t*)&dst[i*dst_stride ]), rnd_avg32(a, b));\
  342. a= AV_RN32(&src1[i*src_stride1+4]);\
  343. b= AV_RN32(&src2[i*src_stride2+4]);\
  344. OP(*((uint32_t*)&dst[i*dst_stride+4]), rnd_avg32(a, b));\
  345. }\
  346. }\
  347. \
  348. static inline void OPNAME ## _pixels4_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  349. int src_stride1, int src_stride2, int h){\
  350. int i;\
  351. for(i=0; i<h; i++){\
  352. uint32_t a,b;\
  353. a= AV_RN32(&src1[i*src_stride1 ]);\
  354. b= AV_RN32(&src2[i*src_stride2 ]);\
  355. OP(*((uint32_t*)&dst[i*dst_stride ]), rnd_avg32(a, b));\
  356. }\
  357. }\
  358. \
  359. static inline void OPNAME ## _pixels2_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  360. int src_stride1, int src_stride2, int h){\
  361. int i;\
  362. for(i=0; i<h; i++){\
  363. uint32_t a,b;\
  364. a= AV_RN16(&src1[i*src_stride1 ]);\
  365. b= AV_RN16(&src2[i*src_stride2 ]);\
  366. OP(*((uint16_t*)&dst[i*dst_stride ]), rnd_avg32(a, b));\
  367. }\
  368. }\
  369. \
  370. static inline void OPNAME ## _pixels16_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  371. int src_stride1, int src_stride2, int h){\
  372. OPNAME ## _pixels8_l2(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);\
  373. OPNAME ## _pixels8_l2(dst+8, src1+8, src2+8, dst_stride, src_stride1, src_stride2, h);\
  374. }\
  375. \
  376. static inline void OPNAME ## _no_rnd_pixels16_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  377. int src_stride1, int src_stride2, int h){\
  378. OPNAME ## _no_rnd_pixels8_l2(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);\
  379. OPNAME ## _no_rnd_pixels8_l2(dst+8, src1+8, src2+8, dst_stride, src_stride1, src_stride2, h);\
  380. }\
  381. \
  382. static inline void OPNAME ## _no_rnd_pixels8_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  383. OPNAME ## _no_rnd_pixels8_l2(block, pixels, pixels+1, line_size, line_size, line_size, h);\
  384. }\
  385. \
  386. static inline void OPNAME ## _pixels8_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  387. OPNAME ## _pixels8_l2(block, pixels, pixels+1, line_size, line_size, line_size, h);\
  388. }\
  389. \
  390. static inline void OPNAME ## _no_rnd_pixels8_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  391. OPNAME ## _no_rnd_pixels8_l2(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
  392. }\
  393. \
  394. static inline void OPNAME ## _pixels8_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  395. OPNAME ## _pixels8_l2(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
  396. }\
  397. \
  398. static inline void OPNAME ## _pixels8_l4(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
  399. int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  400. int i;\
  401. for(i=0; i<h; i++){\
  402. uint32_t a, b, c, d, l0, l1, h0, h1;\
  403. a= AV_RN32(&src1[i*src_stride1]);\
  404. b= AV_RN32(&src2[i*src_stride2]);\
  405. c= AV_RN32(&src3[i*src_stride3]);\
  406. d= AV_RN32(&src4[i*src_stride4]);\
  407. l0= (a&0x03030303UL)\
  408. + (b&0x03030303UL)\
  409. + 0x02020202UL;\
  410. h0= ((a&0xFCFCFCFCUL)>>2)\
  411. + ((b&0xFCFCFCFCUL)>>2);\
  412. l1= (c&0x03030303UL)\
  413. + (d&0x03030303UL);\
  414. h1= ((c&0xFCFCFCFCUL)>>2)\
  415. + ((d&0xFCFCFCFCUL)>>2);\
  416. OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  417. a= AV_RN32(&src1[i*src_stride1+4]);\
  418. b= AV_RN32(&src2[i*src_stride2+4]);\
  419. c= AV_RN32(&src3[i*src_stride3+4]);\
  420. d= AV_RN32(&src4[i*src_stride4+4]);\
  421. l0= (a&0x03030303UL)\
  422. + (b&0x03030303UL)\
  423. + 0x02020202UL;\
  424. h0= ((a&0xFCFCFCFCUL)>>2)\
  425. + ((b&0xFCFCFCFCUL)>>2);\
  426. l1= (c&0x03030303UL)\
  427. + (d&0x03030303UL);\
  428. h1= ((c&0xFCFCFCFCUL)>>2)\
  429. + ((d&0xFCFCFCFCUL)>>2);\
  430. OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  431. }\
  432. }\
  433. \
  434. static inline void OPNAME ## _pixels4_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  435. OPNAME ## _pixels4_l2(block, pixels, pixels+1, line_size, line_size, line_size, h);\
  436. }\
  437. \
  438. static inline void OPNAME ## _pixels4_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  439. OPNAME ## _pixels4_l2(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
  440. }\
  441. \
  442. static inline void OPNAME ## _pixels2_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  443. OPNAME ## _pixels2_l2(block, pixels, pixels+1, line_size, line_size, line_size, h);\
  444. }\
  445. \
  446. static inline void OPNAME ## _pixels2_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  447. OPNAME ## _pixels2_l2(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
  448. }\
  449. \
  450. static inline void OPNAME ## _no_rnd_pixels8_l4(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
  451. int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  452. int i;\
  453. for(i=0; i<h; i++){\
  454. uint32_t a, b, c, d, l0, l1, h0, h1;\
  455. a= AV_RN32(&src1[i*src_stride1]);\
  456. b= AV_RN32(&src2[i*src_stride2]);\
  457. c= AV_RN32(&src3[i*src_stride3]);\
  458. d= AV_RN32(&src4[i*src_stride4]);\
  459. l0= (a&0x03030303UL)\
  460. + (b&0x03030303UL)\
  461. + 0x01010101UL;\
  462. h0= ((a&0xFCFCFCFCUL)>>2)\
  463. + ((b&0xFCFCFCFCUL)>>2);\
  464. l1= (c&0x03030303UL)\
  465. + (d&0x03030303UL);\
  466. h1= ((c&0xFCFCFCFCUL)>>2)\
  467. + ((d&0xFCFCFCFCUL)>>2);\
  468. OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  469. a= AV_RN32(&src1[i*src_stride1+4]);\
  470. b= AV_RN32(&src2[i*src_stride2+4]);\
  471. c= AV_RN32(&src3[i*src_stride3+4]);\
  472. d= AV_RN32(&src4[i*src_stride4+4]);\
  473. l0= (a&0x03030303UL)\
  474. + (b&0x03030303UL)\
  475. + 0x01010101UL;\
  476. h0= ((a&0xFCFCFCFCUL)>>2)\
  477. + ((b&0xFCFCFCFCUL)>>2);\
  478. l1= (c&0x03030303UL)\
  479. + (d&0x03030303UL);\
  480. h1= ((c&0xFCFCFCFCUL)>>2)\
  481. + ((d&0xFCFCFCFCUL)>>2);\
  482. OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  483. }\
  484. }\
  485. static inline void OPNAME ## _pixels16_l4(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
  486. int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  487. OPNAME ## _pixels8_l4(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
  488. OPNAME ## _pixels8_l4(dst+8, src1+8, src2+8, src3+8, src4+8, dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
  489. }\
  490. static inline void OPNAME ## _no_rnd_pixels16_l4(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
  491. int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  492. OPNAME ## _no_rnd_pixels8_l4(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
  493. OPNAME ## _no_rnd_pixels8_l4(dst+8, src1+8, src2+8, src3+8, src4+8, dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
  494. }\
  495. \
  496. static inline void OPNAME ## _pixels2_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  497. {\
  498. int i, a0, b0, a1, b1;\
  499. a0= pixels[0];\
  500. b0= pixels[1] + 2;\
  501. a0 += b0;\
  502. b0 += pixels[2];\
  503. \
  504. pixels+=line_size;\
  505. for(i=0; i<h; i+=2){\
  506. a1= pixels[0];\
  507. b1= pixels[1];\
  508. a1 += b1;\
  509. b1 += pixels[2];\
  510. \
  511. block[0]= (a1+a0)>>2; /* FIXME non put */\
  512. block[1]= (b1+b0)>>2;\
  513. \
  514. pixels+=line_size;\
  515. block +=line_size;\
  516. \
  517. a0= pixels[0];\
  518. b0= pixels[1] + 2;\
  519. a0 += b0;\
  520. b0 += pixels[2];\
  521. \
  522. block[0]= (a1+a0)>>2;\
  523. block[1]= (b1+b0)>>2;\
  524. pixels+=line_size;\
  525. block +=line_size;\
  526. }\
  527. }\
  528. \
  529. static inline void OPNAME ## _pixels4_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  530. {\
  531. int i;\
  532. const uint32_t a= AV_RN32(pixels );\
  533. const uint32_t b= AV_RN32(pixels+1);\
  534. uint32_t l0= (a&0x03030303UL)\
  535. + (b&0x03030303UL)\
  536. + 0x02020202UL;\
  537. uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
  538. + ((b&0xFCFCFCFCUL)>>2);\
  539. uint32_t l1,h1;\
  540. \
  541. pixels+=line_size;\
  542. for(i=0; i<h; i+=2){\
  543. uint32_t a= AV_RN32(pixels );\
  544. uint32_t b= AV_RN32(pixels+1);\
  545. l1= (a&0x03030303UL)\
  546. + (b&0x03030303UL);\
  547. h1= ((a&0xFCFCFCFCUL)>>2)\
  548. + ((b&0xFCFCFCFCUL)>>2);\
  549. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  550. pixels+=line_size;\
  551. block +=line_size;\
  552. a= AV_RN32(pixels );\
  553. b= AV_RN32(pixels+1);\
  554. l0= (a&0x03030303UL)\
  555. + (b&0x03030303UL)\
  556. + 0x02020202UL;\
  557. h0= ((a&0xFCFCFCFCUL)>>2)\
  558. + ((b&0xFCFCFCFCUL)>>2);\
  559. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  560. pixels+=line_size;\
  561. block +=line_size;\
  562. }\
  563. }\
  564. \
  565. static inline void OPNAME ## _pixels8_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  566. {\
  567. int j;\
  568. for(j=0; j<2; j++){\
  569. int i;\
  570. const uint32_t a= AV_RN32(pixels );\
  571. const uint32_t b= AV_RN32(pixels+1);\
  572. uint32_t l0= (a&0x03030303UL)\
  573. + (b&0x03030303UL)\
  574. + 0x02020202UL;\
  575. uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
  576. + ((b&0xFCFCFCFCUL)>>2);\
  577. uint32_t l1,h1;\
  578. \
  579. pixels+=line_size;\
  580. for(i=0; i<h; i+=2){\
  581. uint32_t a= AV_RN32(pixels );\
  582. uint32_t b= AV_RN32(pixels+1);\
  583. l1= (a&0x03030303UL)\
  584. + (b&0x03030303UL);\
  585. h1= ((a&0xFCFCFCFCUL)>>2)\
  586. + ((b&0xFCFCFCFCUL)>>2);\
  587. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  588. pixels+=line_size;\
  589. block +=line_size;\
  590. a= AV_RN32(pixels );\
  591. b= AV_RN32(pixels+1);\
  592. l0= (a&0x03030303UL)\
  593. + (b&0x03030303UL)\
  594. + 0x02020202UL;\
  595. h0= ((a&0xFCFCFCFCUL)>>2)\
  596. + ((b&0xFCFCFCFCUL)>>2);\
  597. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  598. pixels+=line_size;\
  599. block +=line_size;\
  600. }\
  601. pixels+=4-line_size*(h+1);\
  602. block +=4-line_size*h;\
  603. }\
  604. }\
  605. \
  606. static inline void OPNAME ## _no_rnd_pixels8_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  607. {\
  608. int j;\
  609. for(j=0; j<2; j++){\
  610. int i;\
  611. const uint32_t a= AV_RN32(pixels );\
  612. const uint32_t b= AV_RN32(pixels+1);\
  613. uint32_t l0= (a&0x03030303UL)\
  614. + (b&0x03030303UL)\
  615. + 0x01010101UL;\
  616. uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
  617. + ((b&0xFCFCFCFCUL)>>2);\
  618. uint32_t l1,h1;\
  619. \
  620. pixels+=line_size;\
  621. for(i=0; i<h; i+=2){\
  622. uint32_t a= AV_RN32(pixels );\
  623. uint32_t b= AV_RN32(pixels+1);\
  624. l1= (a&0x03030303UL)\
  625. + (b&0x03030303UL);\
  626. h1= ((a&0xFCFCFCFCUL)>>2)\
  627. + ((b&0xFCFCFCFCUL)>>2);\
  628. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  629. pixels+=line_size;\
  630. block +=line_size;\
  631. a= AV_RN32(pixels );\
  632. b= AV_RN32(pixels+1);\
  633. l0= (a&0x03030303UL)\
  634. + (b&0x03030303UL)\
  635. + 0x01010101UL;\
  636. h0= ((a&0xFCFCFCFCUL)>>2)\
  637. + ((b&0xFCFCFCFCUL)>>2);\
  638. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  639. pixels+=line_size;\
  640. block +=line_size;\
  641. }\
  642. pixels+=4-line_size*(h+1);\
  643. block +=4-line_size*h;\
  644. }\
  645. }\
  646. \
  647. CALL_2X_PIXELS(OPNAME ## _pixels16_c , OPNAME ## _pixels8_c , 8)\
  648. CALL_2X_PIXELS(OPNAME ## _pixels16_x2_c , OPNAME ## _pixels8_x2_c , 8)\
  649. CALL_2X_PIXELS(OPNAME ## _pixels16_y2_c , OPNAME ## _pixels8_y2_c , 8)\
  650. CALL_2X_PIXELS(OPNAME ## _pixels16_xy2_c, OPNAME ## _pixels8_xy2_c, 8)\
  651. av_unused CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_c , OPNAME ## _pixels8_c , 8)\
  652. CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_x2_c , OPNAME ## _no_rnd_pixels8_x2_c , 8)\
  653. CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_y2_c , OPNAME ## _no_rnd_pixels8_y2_c , 8)\
  654. CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_xy2_c, OPNAME ## _no_rnd_pixels8_xy2_c, 8)\
  655. #define op_avg(a, b) a = rnd_avg32(a, b)
  656. #endif
  657. #define op_put(a, b) a = b
  658. PIXOP2(avg, op_avg)
  659. PIXOP2(put, op_put)
  660. #undef op_avg
  661. #undef op_put
  662. #define put_no_rnd_pixels8_c put_pixels8_c
  663. #define put_no_rnd_pixels16_c put_pixels16_c
  664. static void put_no_rnd_pixels16_l2_c(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h){
  665. put_no_rnd_pixels16_l2(dst, a, b, stride, stride, stride, h);
  666. }
  667. static void put_no_rnd_pixels8_l2_c(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h){
  668. put_no_rnd_pixels8_l2(dst, a, b, stride, stride, stride, h);
  669. }
  670. #define H264_CHROMA_MC(OPNAME, OP)\
  671. static void OPNAME ## h264_chroma_mc2_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
  672. const int A=(8-x)*(8-y);\
  673. const int B=( x)*(8-y);\
  674. const int C=(8-x)*( y);\
  675. const int D=( x)*( y);\
  676. int i;\
  677. \
  678. assert(x<8 && y<8 && x>=0 && y>=0);\
  679. \
  680. if(D){\
  681. for(i=0; i<h; i++){\
  682. OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
  683. OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
  684. dst+= stride;\
  685. src+= stride;\
  686. }\
  687. }else{\
  688. const int E= B+C;\
  689. const int step= C ? stride : 1;\
  690. for(i=0; i<h; i++){\
  691. OP(dst[0], (A*src[0] + E*src[step+0]));\
  692. OP(dst[1], (A*src[1] + E*src[step+1]));\
  693. dst+= stride;\
  694. src+= stride;\
  695. }\
  696. }\
  697. }\
  698. \
  699. static void OPNAME ## h264_chroma_mc4_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
  700. const int A=(8-x)*(8-y);\
  701. const int B=( x)*(8-y);\
  702. const int C=(8-x)*( y);\
  703. const int D=( x)*( y);\
  704. int i;\
  705. \
  706. assert(x<8 && y<8 && x>=0 && y>=0);\
  707. \
  708. if(D){\
  709. for(i=0; i<h; i++){\
  710. OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
  711. OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
  712. OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
  713. OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
  714. dst+= stride;\
  715. src+= stride;\
  716. }\
  717. }else{\
  718. const int E= B+C;\
  719. const int step= C ? stride : 1;\
  720. for(i=0; i<h; i++){\
  721. OP(dst[0], (A*src[0] + E*src[step+0]));\
  722. OP(dst[1], (A*src[1] + E*src[step+1]));\
  723. OP(dst[2], (A*src[2] + E*src[step+2]));\
  724. OP(dst[3], (A*src[3] + E*src[step+3]));\
  725. dst+= stride;\
  726. src+= stride;\
  727. }\
  728. }\
  729. }\
  730. \
  731. static void OPNAME ## h264_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
  732. const int A=(8-x)*(8-y);\
  733. const int B=( x)*(8-y);\
  734. const int C=(8-x)*( y);\
  735. const int D=( x)*( y);\
  736. int i;\
  737. \
  738. assert(x<8 && y<8 && x>=0 && y>=0);\
  739. \
  740. if(D){\
  741. for(i=0; i<h; i++){\
  742. OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
  743. OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
  744. OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
  745. OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
  746. OP(dst[4], (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5]));\
  747. OP(dst[5], (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6]));\
  748. OP(dst[6], (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7]));\
  749. OP(dst[7], (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8]));\
  750. dst+= stride;\
  751. src+= stride;\
  752. }\
  753. }else{\
  754. const int E= B+C;\
  755. const int step= C ? stride : 1;\
  756. for(i=0; i<h; i++){\
  757. OP(dst[0], (A*src[0] + E*src[step+0]));\
  758. OP(dst[1], (A*src[1] + E*src[step+1]));\
  759. OP(dst[2], (A*src[2] + E*src[step+2]));\
  760. OP(dst[3], (A*src[3] + E*src[step+3]));\
  761. OP(dst[4], (A*src[4] + E*src[step+4]));\
  762. OP(dst[5], (A*src[5] + E*src[step+5]));\
  763. OP(dst[6], (A*src[6] + E*src[step+6]));\
  764. OP(dst[7], (A*src[7] + E*src[step+7]));\
  765. dst+= stride;\
  766. src+= stride;\
  767. }\
  768. }\
  769. }
  770. #define op_avg(a, b) a = (((a)+(((b) + 32)>>6)+1)>>1)
  771. #define op_put(a, b) a = (((b) + 32)>>6)
  772. H264_CHROMA_MC(put_ , op_put)
  773. H264_CHROMA_MC(avg_ , op_avg)
  774. #undef op_avg
  775. #undef op_put
  776. #define H264_LOWPASS(OPNAME, OP, OP2) \
  777. static av_unused void OPNAME ## h264_qpel2_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  778. const int h=2;\
  779. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  780. int i;\
  781. for(i=0; i<h; i++)\
  782. {\
  783. OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
  784. OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
  785. dst+=dstStride;\
  786. src+=srcStride;\
  787. }\
  788. }\
  789. \
  790. static av_unused void OPNAME ## h264_qpel2_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  791. const int w=2;\
  792. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  793. int i;\
  794. for(i=0; i<w; i++)\
  795. {\
  796. const int srcB= src[-2*srcStride];\
  797. const int srcA= src[-1*srcStride];\
  798. const int src0= src[0 *srcStride];\
  799. const int src1= src[1 *srcStride];\
  800. const int src2= src[2 *srcStride];\
  801. const int src3= src[3 *srcStride];\
  802. const int src4= src[4 *srcStride];\
  803. OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
  804. OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
  805. dst++;\
  806. src++;\
  807. }\
  808. }\
  809. \
  810. static av_unused void OPNAME ## h264_qpel2_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  811. const int h=2;\
  812. const int w=2;\
  813. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  814. int i;\
  815. src -= 2*srcStride;\
  816. for(i=0; i<h+5; i++)\
  817. {\
  818. tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]);\
  819. tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]);\
  820. tmp+=tmpStride;\
  821. src+=srcStride;\
  822. }\
  823. tmp -= tmpStride*(h+5-2);\
  824. for(i=0; i<w; i++)\
  825. {\
  826. const int tmpB= tmp[-2*tmpStride];\
  827. const int tmpA= tmp[-1*tmpStride];\
  828. const int tmp0= tmp[0 *tmpStride];\
  829. const int tmp1= tmp[1 *tmpStride];\
  830. const int tmp2= tmp[2 *tmpStride];\
  831. const int tmp3= tmp[3 *tmpStride];\
  832. const int tmp4= tmp[4 *tmpStride];\
  833. OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
  834. OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
  835. dst++;\
  836. tmp++;\
  837. }\
  838. }\
  839. static void OPNAME ## h264_qpel4_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  840. const int h=4;\
  841. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  842. int i;\
  843. for(i=0; i<h; i++)\
  844. {\
  845. OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
  846. OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
  847. OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]));\
  848. OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]));\
  849. dst+=dstStride;\
  850. src+=srcStride;\
  851. }\
  852. }\
  853. \
  854. static void OPNAME ## h264_qpel4_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  855. const int w=4;\
  856. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  857. int i;\
  858. for(i=0; i<w; i++)\
  859. {\
  860. const int srcB= src[-2*srcStride];\
  861. const int srcA= src[-1*srcStride];\
  862. const int src0= src[0 *srcStride];\
  863. const int src1= src[1 *srcStride];\
  864. const int src2= src[2 *srcStride];\
  865. const int src3= src[3 *srcStride];\
  866. const int src4= src[4 *srcStride];\
  867. const int src5= src[5 *srcStride];\
  868. const int src6= src[6 *srcStride];\
  869. OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
  870. OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
  871. OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
  872. OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
  873. dst++;\
  874. src++;\
  875. }\
  876. }\
  877. \
  878. static void OPNAME ## h264_qpel4_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  879. const int h=4;\
  880. const int w=4;\
  881. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  882. int i;\
  883. src -= 2*srcStride;\
  884. for(i=0; i<h+5; i++)\
  885. {\
  886. tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]);\
  887. tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]);\
  888. tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]);\
  889. tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]);\
  890. tmp+=tmpStride;\
  891. src+=srcStride;\
  892. }\
  893. tmp -= tmpStride*(h+5-2);\
  894. for(i=0; i<w; i++)\
  895. {\
  896. const int tmpB= tmp[-2*tmpStride];\
  897. const int tmpA= tmp[-1*tmpStride];\
  898. const int tmp0= tmp[0 *tmpStride];\
  899. const int tmp1= tmp[1 *tmpStride];\
  900. const int tmp2= tmp[2 *tmpStride];\
  901. const int tmp3= tmp[3 *tmpStride];\
  902. const int tmp4= tmp[4 *tmpStride];\
  903. const int tmp5= tmp[5 *tmpStride];\
  904. const int tmp6= tmp[6 *tmpStride];\
  905. OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
  906. OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
  907. OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
  908. OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
  909. dst++;\
  910. tmp++;\
  911. }\
  912. }\
  913. \
  914. static void OPNAME ## h264_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  915. const int h=8;\
  916. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  917. int i;\
  918. for(i=0; i<h; i++)\
  919. {\
  920. OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]));\
  921. OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]));\
  922. OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]));\
  923. OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]));\
  924. OP(dst[4], (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]));\
  925. OP(dst[5], (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]));\
  926. OP(dst[6], (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]));\
  927. OP(dst[7], (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]));\
  928. dst+=dstStride;\
  929. src+=srcStride;\
  930. }\
  931. }\
  932. \
  933. static void OPNAME ## h264_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  934. const int w=8;\
  935. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  936. int i;\
  937. for(i=0; i<w; i++)\
  938. {\
  939. const int srcB= src[-2*srcStride];\
  940. const int srcA= src[-1*srcStride];\
  941. const int src0= src[0 *srcStride];\
  942. const int src1= src[1 *srcStride];\
  943. const int src2= src[2 *srcStride];\
  944. const int src3= src[3 *srcStride];\
  945. const int src4= src[4 *srcStride];\
  946. const int src5= src[5 *srcStride];\
  947. const int src6= src[6 *srcStride];\
  948. const int src7= src[7 *srcStride];\
  949. const int src8= src[8 *srcStride];\
  950. const int src9= src[9 *srcStride];\
  951. const int src10=src[10*srcStride];\
  952. OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
  953. OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
  954. OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
  955. OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
  956. OP(dst[4*dstStride], (src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
  957. OP(dst[5*dstStride], (src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
  958. OP(dst[6*dstStride], (src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
  959. OP(dst[7*dstStride], (src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
  960. dst++;\
  961. src++;\
  962. }\
  963. }\
  964. \
  965. static void OPNAME ## h264_qpel8_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  966. const int h=8;\
  967. const int w=8;\
  968. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  969. int i;\
  970. src -= 2*srcStride;\
  971. for(i=0; i<h+5; i++)\
  972. {\
  973. tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]);\
  974. tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]);\
  975. tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]);\
  976. tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]);\
  977. tmp[4]= (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]);\
  978. tmp[5]= (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]);\
  979. tmp[6]= (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]);\
  980. tmp[7]= (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]);\
  981. tmp+=tmpStride;\
  982. src+=srcStride;\
  983. }\
  984. tmp -= tmpStride*(h+5-2);\
  985. for(i=0; i<w; i++)\
  986. {\
  987. const int tmpB= tmp[-2*tmpStride];\
  988. const int tmpA= tmp[-1*tmpStride];\
  989. const int tmp0= tmp[0 *tmpStride];\
  990. const int tmp1= tmp[1 *tmpStride];\
  991. const int tmp2= tmp[2 *tmpStride];\
  992. const int tmp3= tmp[3 *tmpStride];\
  993. const int tmp4= tmp[4 *tmpStride];\
  994. const int tmp5= tmp[5 *tmpStride];\
  995. const int tmp6= tmp[6 *tmpStride];\
  996. const int tmp7= tmp[7 *tmpStride];\
  997. const int tmp8= tmp[8 *tmpStride];\
  998. const int tmp9= tmp[9 *tmpStride];\
  999. const int tmp10=tmp[10*tmpStride];\
  1000. OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
  1001. OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
  1002. OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
  1003. OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
  1004. OP2(dst[4*dstStride], (tmp4+tmp5)*20 - (tmp3+tmp6)*5 + (tmp2+tmp7));\
  1005. OP2(dst[5*dstStride], (tmp5+tmp6)*20 - (tmp4+tmp7)*5 + (tmp3+tmp8));\
  1006. OP2(dst[6*dstStride], (tmp6+tmp7)*20 - (tmp5+tmp8)*5 + (tmp4+tmp9));\
  1007. OP2(dst[7*dstStride], (tmp7+tmp8)*20 - (tmp6+tmp9)*5 + (tmp5+tmp10));\
  1008. dst++;\
  1009. tmp++;\
  1010. }\
  1011. }\
  1012. \
  1013. static void OPNAME ## h264_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1014. OPNAME ## h264_qpel8_v_lowpass(dst , src , dstStride, srcStride);\
  1015. OPNAME ## h264_qpel8_v_lowpass(dst+8, src+8, dstStride, srcStride);\
  1016. src += 8*srcStride;\
  1017. dst += 8*dstStride;\
  1018. OPNAME ## h264_qpel8_v_lowpass(dst , src , dstStride, srcStride);\
  1019. OPNAME ## h264_qpel8_v_lowpass(dst+8, src+8, dstStride, srcStride);\
  1020. }\
  1021. \
  1022. static void OPNAME ## h264_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1023. OPNAME ## h264_qpel8_h_lowpass(dst , src , dstStride, srcStride);\
  1024. OPNAME ## h264_qpel8_h_lowpass(dst+8, src+8, dstStride, srcStride);\
  1025. src += 8*srcStride;\
  1026. dst += 8*dstStride;\
  1027. OPNAME ## h264_qpel8_h_lowpass(dst , src , dstStride, srcStride);\
  1028. OPNAME ## h264_qpel8_h_lowpass(dst+8, src+8, dstStride, srcStride);\
  1029. }\
  1030. \
  1031. static void OPNAME ## h264_qpel16_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  1032. OPNAME ## h264_qpel8_hv_lowpass(dst , tmp , src , dstStride, tmpStride, srcStride);\
  1033. OPNAME ## h264_qpel8_hv_lowpass(dst+8, tmp+8, src+8, dstStride, tmpStride, srcStride);\
  1034. src += 8*srcStride;\
  1035. dst += 8*dstStride;\
  1036. OPNAME ## h264_qpel8_hv_lowpass(dst , tmp , src , dstStride, tmpStride, srcStride);\
  1037. OPNAME ## h264_qpel8_hv_lowpass(dst+8, tmp+8, src+8, dstStride, tmpStride, srcStride);\
  1038. }\
  1039. #define H264_MC(OPNAME, SIZE) \
  1040. static av_unused void OPNAME ## h264_qpel ## SIZE ## _mc00_c (uint8_t *dst, uint8_t *src, int stride){\
  1041. OPNAME ## pixels ## SIZE ## _c(dst, src, stride, SIZE);\
  1042. }\
  1043. \
  1044. static void OPNAME ## h264_qpel ## SIZE ## _mc10_c(uint8_t *dst, uint8_t *src, int stride){\
  1045. uint8_t half[SIZE*SIZE];\
  1046. put_h264_qpel ## SIZE ## _h_lowpass(half, src, SIZE, stride);\
  1047. OPNAME ## pixels ## SIZE ## _l2(dst, src, half, stride, stride, SIZE, SIZE);\
  1048. }\
  1049. \
  1050. static void OPNAME ## h264_qpel ## SIZE ## _mc20_c(uint8_t *dst, uint8_t *src, int stride){\
  1051. OPNAME ## h264_qpel ## SIZE ## _h_lowpass(dst, src, stride, stride);\
  1052. }\
  1053. \
  1054. static void OPNAME ## h264_qpel ## SIZE ## _mc30_c(uint8_t *dst, uint8_t *src, int stride){\
  1055. uint8_t half[SIZE*SIZE];\
  1056. put_h264_qpel ## SIZE ## _h_lowpass(half, src, SIZE, stride);\
  1057. OPNAME ## pixels ## SIZE ## _l2(dst, src+1, half, stride, stride, SIZE, SIZE);\
  1058. }\
  1059. \
  1060. static void OPNAME ## h264_qpel ## SIZE ## _mc01_c(uint8_t *dst, uint8_t *src, int stride){\
  1061. uint8_t full[SIZE*(SIZE+5)];\
  1062. uint8_t * const full_mid= full + SIZE*2;\
  1063. uint8_t half[SIZE*SIZE];\
  1064. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1065. put_h264_qpel ## SIZE ## _v_lowpass(half, full_mid, SIZE, SIZE);\
  1066. OPNAME ## pixels ## SIZE ## _l2(dst, full_mid, half, stride, SIZE, SIZE, SIZE);\
  1067. }\
  1068. \
  1069. static void OPNAME ## h264_qpel ## SIZE ## _mc02_c(uint8_t *dst, uint8_t *src, int stride){\
  1070. uint8_t full[SIZE*(SIZE+5)];\
  1071. uint8_t * const full_mid= full + SIZE*2;\
  1072. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1073. OPNAME ## h264_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE);\
  1074. }\
  1075. \
  1076. static void OPNAME ## h264_qpel ## SIZE ## _mc03_c(uint8_t *dst, uint8_t *src, int stride){\
  1077. uint8_t full[SIZE*(SIZE+5)];\
  1078. uint8_t * const full_mid= full + SIZE*2;\
  1079. uint8_t half[SIZE*SIZE];\
  1080. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1081. put_h264_qpel ## SIZE ## _v_lowpass(half, full_mid, SIZE, SIZE);\
  1082. OPNAME ## pixels ## SIZE ## _l2(dst, full_mid+SIZE, half, stride, SIZE, SIZE, SIZE);\
  1083. }\
  1084. \
  1085. static void OPNAME ## h264_qpel ## SIZE ## _mc11_c(uint8_t *dst, uint8_t *src, int stride){\
  1086. uint8_t full[SIZE*(SIZE+5)];\
  1087. uint8_t * const full_mid= full + SIZE*2;\
  1088. uint8_t halfH[SIZE*SIZE];\
  1089. uint8_t halfV[SIZE*SIZE];\
  1090. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
  1091. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1092. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1093. OPNAME ## pixels ## SIZE ## _l2(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
  1094. }\
  1095. \
  1096. static void OPNAME ## h264_qpel ## SIZE ## _mc31_c(uint8_t *dst, uint8_t *src, int stride){\
  1097. uint8_t full[SIZE*(SIZE+5)];\
  1098. uint8_t * const full_mid= full + SIZE*2;\
  1099. uint8_t halfH[SIZE*SIZE];\
  1100. uint8_t halfV[SIZE*SIZE];\
  1101. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
  1102. copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
  1103. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1104. OPNAME ## pixels ## SIZE ## _l2(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
  1105. }\
  1106. \
  1107. static void OPNAME ## h264_qpel ## SIZE ## _mc13_c(uint8_t *dst, uint8_t *src, int stride){\
  1108. uint8_t full[SIZE*(SIZE+5)];\
  1109. uint8_t * const full_mid= full + SIZE*2;\
  1110. uint8_t halfH[SIZE*SIZE];\
  1111. uint8_t halfV[SIZE*SIZE];\
  1112. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
  1113. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1114. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1115. OPNAME ## pixels ## SIZE ## _l2(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
  1116. }\
  1117. \
  1118. static void OPNAME ## h264_qpel ## SIZE ## _mc33_c(uint8_t *dst, uint8_t *src, int stride){\
  1119. uint8_t full[SIZE*(SIZE+5)];\
  1120. uint8_t * const full_mid= full + SIZE*2;\
  1121. uint8_t halfH[SIZE*SIZE];\
  1122. uint8_t halfV[SIZE*SIZE];\
  1123. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
  1124. copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
  1125. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1126. OPNAME ## pixels ## SIZE ## _l2(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
  1127. }\
  1128. \
  1129. static void OPNAME ## h264_qpel ## SIZE ## _mc22_c(uint8_t *dst, uint8_t *src, int stride){\
  1130. int16_t tmp[SIZE*(SIZE+5)];\
  1131. OPNAME ## h264_qpel ## SIZE ## _hv_lowpass(dst, tmp, src, stride, SIZE, stride);\
  1132. }\
  1133. \
  1134. static void OPNAME ## h264_qpel ## SIZE ## _mc21_c(uint8_t *dst, uint8_t *src, int stride){\
  1135. int16_t tmp[SIZE*(SIZE+5)];\
  1136. uint8_t halfH[SIZE*SIZE];\
  1137. uint8_t halfHV[SIZE*SIZE];\
  1138. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
  1139. put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
  1140. OPNAME ## pixels ## SIZE ## _l2(dst, halfH, halfHV, stride, SIZE, SIZE, SIZE);\
  1141. }\
  1142. \
  1143. static void OPNAME ## h264_qpel ## SIZE ## _mc23_c(uint8_t *dst, uint8_t *src, int stride){\
  1144. int16_t tmp[SIZE*(SIZE+5)];\
  1145. uint8_t halfH[SIZE*SIZE];\
  1146. uint8_t halfHV[SIZE*SIZE];\
  1147. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
  1148. put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
  1149. OPNAME ## pixels ## SIZE ## _l2(dst, halfH, halfHV, stride, SIZE, SIZE, SIZE);\
  1150. }\
  1151. \
  1152. static void OPNAME ## h264_qpel ## SIZE ## _mc12_c(uint8_t *dst, uint8_t *src, int stride){\
  1153. uint8_t full[SIZE*(SIZE+5)];\
  1154. uint8_t * const full_mid= full + SIZE*2;\
  1155. int16_t tmp[SIZE*(SIZE+5)];\
  1156. uint8_t halfV[SIZE*SIZE];\
  1157. uint8_t halfHV[SIZE*SIZE];\
  1158. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  1159. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1160. put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
  1161. OPNAME ## pixels ## SIZE ## _l2(dst, halfV, halfHV, stride, SIZE, SIZE, SIZE);\
  1162. }\
  1163. \
  1164. static void OPNAME ## h264_qpel ## SIZE ## _mc32_c(uint8_t *dst, uint8_t *src, int stride){\
  1165. uint8_t full[SIZE*(SIZE+5)];\
  1166. uint8_t * const full_mid= full + SIZE*2;\
  1167. int16_t tmp[SIZE*(SIZE+5)];\
  1168. uint8_t halfV[SIZE*SIZE];\
  1169. uint8_t halfHV[SIZE*SIZE];\
  1170. copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
  1171. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  1172. put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
  1173. OPNAME ## pixels ## SIZE ## _l2(dst, halfV, halfHV, stride, SIZE, SIZE, SIZE);\
  1174. }\
  1175. #define op_avg(a, b) a = (((a)+cm[((b) + 16)>>5]+1)>>1)
  1176. //#define op_avg2(a, b) a = (((a)*w1+cm[((b) + 16)>>5]*w2 + o + 64)>>7)
  1177. #define op_put(a, b) a = cm[((b) + 16)>>5]
  1178. #define op2_avg(a, b) a = (((a)+cm[((b) + 512)>>10]+1)>>1)
  1179. #define op2_put(a, b) a = cm[((b) + 512)>>10]
  1180. H264_LOWPASS(put_ , op_put, op2_put)
  1181. H264_LOWPASS(avg_ , op_avg, op2_avg)
  1182. H264_MC(put_, 2)
  1183. H264_MC(put_, 4)
  1184. H264_MC(put_, 8)
  1185. H264_MC(put_, 16)
  1186. H264_MC(avg_, 4)
  1187. H264_MC(avg_, 8)
  1188. H264_MC(avg_, 16)
  1189. #undef op_avg
  1190. #undef op_put
  1191. #undef op2_avg
  1192. #undef op2_put
  1193. #define put_h264_qpel8_mc00_c ff_put_pixels8x8_c
  1194. #define avg_h264_qpel8_mc00_c ff_avg_pixels8x8_c
  1195. #define put_h264_qpel16_mc00_c ff_put_pixels16x16_c
  1196. #define avg_h264_qpel16_mc00_c ff_avg_pixels16x16_c
  1197. void ff_put_pixels8x8_c(uint8_t *dst, uint8_t *src, int stride) {
  1198. put_pixels8_c(dst, src, stride, 8);
  1199. }
  1200. void ff_avg_pixels8x8_c(uint8_t *dst, uint8_t *src, int stride) {
  1201. avg_pixels8_c(dst, src, stride, 8);
  1202. }
  1203. void ff_put_pixels16x16_c(uint8_t *dst, uint8_t *src, int stride) {
  1204. put_pixels16_c(dst, src, stride, 16);
  1205. }
  1206. void ff_avg_pixels16x16_c(uint8_t *dst, uint8_t *src, int stride) {
  1207. avg_pixels16_c(dst, src, stride, 16);
  1208. }
  1209. static void clear_block_c(DCTELEM *block)
  1210. {
  1211. memset(block, 0, sizeof(DCTELEM)*64);
  1212. }
  1213. /**
  1214. * memset(blocks, 0, sizeof(DCTELEM)*6*64)
  1215. */
  1216. static void clear_blocks_c(DCTELEM *blocks)
  1217. {
  1218. memset(blocks, 0, sizeof(DCTELEM)*6*64);
  1219. }