You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

4566 lines
160KB

  1. /*
  2. * DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * gmc & q-pel & 32/64 bit based MC by Michael Niedermayer <michaelni@gmx.at>
  7. *
  8. * This file is part of FFmpeg.
  9. *
  10. * FFmpeg is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * FFmpeg is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with FFmpeg; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. /**
  25. * @file libavcodec/dsputil.c
  26. * DSP utils
  27. */
  28. #include "avcodec.h"
  29. #include "dsputil.h"
  30. #include "simple_idct.h"
  31. #include "faandct.h"
  32. #include "faanidct.h"
  33. #include "mathops.h"
  34. #include "mpegvideo.h"
  35. #include "config.h"
  36. #include "lpc.h"
  37. #include "ac3dec.h"
  38. #include "vorbis.h"
  39. #include "png.h"
  40. uint8_t ff_cropTbl[256 + 2 * MAX_NEG_CROP] = {0, };
  41. uint32_t ff_squareTbl[512] = {0, };
  42. // 0x7f7f7f7f or 0x7f7f7f7f7f7f7f7f or whatever, depending on the cpu's native arithmetic size
  43. #define pb_7f (~0UL/255 * 0x7f)
  44. #define pb_80 (~0UL/255 * 0x80)
  45. const uint8_t ff_zigzag_direct[64] = {
  46. 0, 1, 8, 16, 9, 2, 3, 10,
  47. 17, 24, 32, 25, 18, 11, 4, 5,
  48. 12, 19, 26, 33, 40, 48, 41, 34,
  49. 27, 20, 13, 6, 7, 14, 21, 28,
  50. 35, 42, 49, 56, 57, 50, 43, 36,
  51. 29, 22, 15, 23, 30, 37, 44, 51,
  52. 58, 59, 52, 45, 38, 31, 39, 46,
  53. 53, 60, 61, 54, 47, 55, 62, 63
  54. };
  55. /* Specific zigzag scan for 248 idct. NOTE that unlike the
  56. specification, we interleave the fields */
  57. const uint8_t ff_zigzag248_direct[64] = {
  58. 0, 8, 1, 9, 16, 24, 2, 10,
  59. 17, 25, 32, 40, 48, 56, 33, 41,
  60. 18, 26, 3, 11, 4, 12, 19, 27,
  61. 34, 42, 49, 57, 50, 58, 35, 43,
  62. 20, 28, 5, 13, 6, 14, 21, 29,
  63. 36, 44, 51, 59, 52, 60, 37, 45,
  64. 22, 30, 7, 15, 23, 31, 38, 46,
  65. 53, 61, 54, 62, 39, 47, 55, 63,
  66. };
  67. /* not permutated inverse zigzag_direct + 1 for MMX quantizer */
  68. DECLARE_ALIGNED(16, uint16_t, inv_zigzag_direct16)[64];
  69. const uint8_t ff_alternate_horizontal_scan[64] = {
  70. 0, 1, 2, 3, 8, 9, 16, 17,
  71. 10, 11, 4, 5, 6, 7, 15, 14,
  72. 13, 12, 19, 18, 24, 25, 32, 33,
  73. 26, 27, 20, 21, 22, 23, 28, 29,
  74. 30, 31, 34, 35, 40, 41, 48, 49,
  75. 42, 43, 36, 37, 38, 39, 44, 45,
  76. 46, 47, 50, 51, 56, 57, 58, 59,
  77. 52, 53, 54, 55, 60, 61, 62, 63,
  78. };
  79. const uint8_t ff_alternate_vertical_scan[64] = {
  80. 0, 8, 16, 24, 1, 9, 2, 10,
  81. 17, 25, 32, 40, 48, 56, 57, 49,
  82. 41, 33, 26, 18, 3, 11, 4, 12,
  83. 19, 27, 34, 42, 50, 58, 35, 43,
  84. 51, 59, 20, 28, 5, 13, 6, 14,
  85. 21, 29, 36, 44, 52, 60, 37, 45,
  86. 53, 61, 22, 30, 7, 15, 23, 31,
  87. 38, 46, 54, 62, 39, 47, 55, 63,
  88. };
  89. /* a*inverse[b]>>32 == a/b for all 0<=a<=16909558 && 2<=b<=256
  90. * for a>16909558, is an overestimate by less than 1 part in 1<<24 */
  91. const uint32_t ff_inverse[257]={
  92. 0, 4294967295U,2147483648U,1431655766, 1073741824, 858993460, 715827883, 613566757,
  93. 536870912, 477218589, 429496730, 390451573, 357913942, 330382100, 306783379, 286331154,
  94. 268435456, 252645136, 238609295, 226050911, 214748365, 204522253, 195225787, 186737709,
  95. 178956971, 171798692, 165191050, 159072863, 153391690, 148102321, 143165577, 138547333,
  96. 134217728, 130150525, 126322568, 122713352, 119304648, 116080198, 113025456, 110127367,
  97. 107374183, 104755300, 102261127, 99882961, 97612894, 95443718, 93368855, 91382283,
  98. 89478486, 87652394, 85899346, 84215046, 82595525, 81037119, 79536432, 78090315,
  99. 76695845, 75350304, 74051161, 72796056, 71582789, 70409300, 69273667, 68174085,
  100. 67108864, 66076420, 65075263, 64103990, 63161284, 62245903, 61356676, 60492498,
  101. 59652324, 58835169, 58040099, 57266231, 56512728, 55778797, 55063684, 54366675,
  102. 53687092, 53024288, 52377650, 51746594, 51130564, 50529028, 49941481, 49367441,
  103. 48806447, 48258060, 47721859, 47197443, 46684428, 46182445, 45691142, 45210183,
  104. 44739243, 44278014, 43826197, 43383509, 42949673, 42524429, 42107523, 41698712,
  105. 41297763, 40904451, 40518560, 40139882, 39768216, 39403370, 39045158, 38693400,
  106. 38347923, 38008561, 37675152, 37347542, 37025581, 36709123, 36398028, 36092163,
  107. 35791395, 35495598, 35204650, 34918434, 34636834, 34359739, 34087043, 33818641,
  108. 33554432, 33294321, 33038210, 32786010, 32537632, 32292988, 32051995, 31814573,
  109. 31580642, 31350127, 31122952, 30899046, 30678338, 30460761, 30246249, 30034737,
  110. 29826162, 29620465, 29417585, 29217465, 29020050, 28825284, 28633116, 28443493,
  111. 28256364, 28071682, 27889399, 27709467, 27531842, 27356480, 27183338, 27012373,
  112. 26843546, 26676816, 26512144, 26349493, 26188825, 26030105, 25873297, 25718368,
  113. 25565282, 25414008, 25264514, 25116768, 24970741, 24826401, 24683721, 24542671,
  114. 24403224, 24265352, 24129030, 23994231, 23860930, 23729102, 23598722, 23469767,
  115. 23342214, 23216040, 23091223, 22967740, 22845571, 22724695, 22605092, 22486740,
  116. 22369622, 22253717, 22139007, 22025474, 21913099, 21801865, 21691755, 21582751,
  117. 21474837, 21367997, 21262215, 21157475, 21053762, 20951060, 20849356, 20748635,
  118. 20648882, 20550083, 20452226, 20355296, 20259280, 20164166, 20069941, 19976593,
  119. 19884108, 19792477, 19701685, 19611723, 19522579, 19434242, 19346700, 19259944,
  120. 19173962, 19088744, 19004281, 18920561, 18837576, 18755316, 18673771, 18592933,
  121. 18512791, 18433337, 18354562, 18276457, 18199014, 18122225, 18046082, 17970575,
  122. 17895698, 17821442, 17747799, 17674763, 17602325, 17530479, 17459217, 17388532,
  123. 17318417, 17248865, 17179870, 17111424, 17043522, 16976156, 16909321, 16843010,
  124. 16777216
  125. };
  126. /* Input permutation for the simple_idct_mmx */
  127. static const uint8_t simple_mmx_permutation[64]={
  128. 0x00, 0x08, 0x04, 0x09, 0x01, 0x0C, 0x05, 0x0D,
  129. 0x10, 0x18, 0x14, 0x19, 0x11, 0x1C, 0x15, 0x1D,
  130. 0x20, 0x28, 0x24, 0x29, 0x21, 0x2C, 0x25, 0x2D,
  131. 0x12, 0x1A, 0x16, 0x1B, 0x13, 0x1E, 0x17, 0x1F,
  132. 0x02, 0x0A, 0x06, 0x0B, 0x03, 0x0E, 0x07, 0x0F,
  133. 0x30, 0x38, 0x34, 0x39, 0x31, 0x3C, 0x35, 0x3D,
  134. 0x22, 0x2A, 0x26, 0x2B, 0x23, 0x2E, 0x27, 0x2F,
  135. 0x32, 0x3A, 0x36, 0x3B, 0x33, 0x3E, 0x37, 0x3F,
  136. };
  137. static const uint8_t idct_sse2_row_perm[8] = {0, 4, 1, 5, 2, 6, 3, 7};
  138. void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable){
  139. int i;
  140. int end;
  141. st->scantable= src_scantable;
  142. for(i=0; i<64; i++){
  143. int j;
  144. j = src_scantable[i];
  145. st->permutated[i] = permutation[j];
  146. #if ARCH_PPC
  147. st->inverse[j] = i;
  148. #endif
  149. }
  150. end=-1;
  151. for(i=0; i<64; i++){
  152. int j;
  153. j = st->permutated[i];
  154. if(j>end) end=j;
  155. st->raster_end[i]= end;
  156. }
  157. }
  158. static int pix_sum_c(uint8_t * pix, int line_size)
  159. {
  160. int s, i, j;
  161. s = 0;
  162. for (i = 0; i < 16; i++) {
  163. for (j = 0; j < 16; j += 8) {
  164. s += pix[0];
  165. s += pix[1];
  166. s += pix[2];
  167. s += pix[3];
  168. s += pix[4];
  169. s += pix[5];
  170. s += pix[6];
  171. s += pix[7];
  172. pix += 8;
  173. }
  174. pix += line_size - 16;
  175. }
  176. return s;
  177. }
  178. static int pix_norm1_c(uint8_t * pix, int line_size)
  179. {
  180. int s, i, j;
  181. uint32_t *sq = ff_squareTbl + 256;
  182. s = 0;
  183. for (i = 0; i < 16; i++) {
  184. for (j = 0; j < 16; j += 8) {
  185. #if 0
  186. s += sq[pix[0]];
  187. s += sq[pix[1]];
  188. s += sq[pix[2]];
  189. s += sq[pix[3]];
  190. s += sq[pix[4]];
  191. s += sq[pix[5]];
  192. s += sq[pix[6]];
  193. s += sq[pix[7]];
  194. #else
  195. #if LONG_MAX > 2147483647
  196. register uint64_t x=*(uint64_t*)pix;
  197. s += sq[x&0xff];
  198. s += sq[(x>>8)&0xff];
  199. s += sq[(x>>16)&0xff];
  200. s += sq[(x>>24)&0xff];
  201. s += sq[(x>>32)&0xff];
  202. s += sq[(x>>40)&0xff];
  203. s += sq[(x>>48)&0xff];
  204. s += sq[(x>>56)&0xff];
  205. #else
  206. register uint32_t x=*(uint32_t*)pix;
  207. s += sq[x&0xff];
  208. s += sq[(x>>8)&0xff];
  209. s += sq[(x>>16)&0xff];
  210. s += sq[(x>>24)&0xff];
  211. x=*(uint32_t*)(pix+4);
  212. s += sq[x&0xff];
  213. s += sq[(x>>8)&0xff];
  214. s += sq[(x>>16)&0xff];
  215. s += sq[(x>>24)&0xff];
  216. #endif
  217. #endif
  218. pix += 8;
  219. }
  220. pix += line_size - 16;
  221. }
  222. return s;
  223. }
  224. static void bswap_buf(uint32_t *dst, const uint32_t *src, int w){
  225. int i;
  226. for(i=0; i+8<=w; i+=8){
  227. dst[i+0]= bswap_32(src[i+0]);
  228. dst[i+1]= bswap_32(src[i+1]);
  229. dst[i+2]= bswap_32(src[i+2]);
  230. dst[i+3]= bswap_32(src[i+3]);
  231. dst[i+4]= bswap_32(src[i+4]);
  232. dst[i+5]= bswap_32(src[i+5]);
  233. dst[i+6]= bswap_32(src[i+6]);
  234. dst[i+7]= bswap_32(src[i+7]);
  235. }
  236. for(;i<w; i++){
  237. dst[i+0]= bswap_32(src[i+0]);
  238. }
  239. }
  240. static int sse4_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h)
  241. {
  242. int s, i;
  243. uint32_t *sq = ff_squareTbl + 256;
  244. s = 0;
  245. for (i = 0; i < h; i++) {
  246. s += sq[pix1[0] - pix2[0]];
  247. s += sq[pix1[1] - pix2[1]];
  248. s += sq[pix1[2] - pix2[2]];
  249. s += sq[pix1[3] - pix2[3]];
  250. pix1 += line_size;
  251. pix2 += line_size;
  252. }
  253. return s;
  254. }
  255. static int sse8_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h)
  256. {
  257. int s, i;
  258. uint32_t *sq = ff_squareTbl + 256;
  259. s = 0;
  260. for (i = 0; i < h; i++) {
  261. s += sq[pix1[0] - pix2[0]];
  262. s += sq[pix1[1] - pix2[1]];
  263. s += sq[pix1[2] - pix2[2]];
  264. s += sq[pix1[3] - pix2[3]];
  265. s += sq[pix1[4] - pix2[4]];
  266. s += sq[pix1[5] - pix2[5]];
  267. s += sq[pix1[6] - pix2[6]];
  268. s += sq[pix1[7] - pix2[7]];
  269. pix1 += line_size;
  270. pix2 += line_size;
  271. }
  272. return s;
  273. }
  274. static int sse16_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  275. {
  276. int s, i;
  277. uint32_t *sq = ff_squareTbl + 256;
  278. s = 0;
  279. for (i = 0; i < h; i++) {
  280. s += sq[pix1[ 0] - pix2[ 0]];
  281. s += sq[pix1[ 1] - pix2[ 1]];
  282. s += sq[pix1[ 2] - pix2[ 2]];
  283. s += sq[pix1[ 3] - pix2[ 3]];
  284. s += sq[pix1[ 4] - pix2[ 4]];
  285. s += sq[pix1[ 5] - pix2[ 5]];
  286. s += sq[pix1[ 6] - pix2[ 6]];
  287. s += sq[pix1[ 7] - pix2[ 7]];
  288. s += sq[pix1[ 8] - pix2[ 8]];
  289. s += sq[pix1[ 9] - pix2[ 9]];
  290. s += sq[pix1[10] - pix2[10]];
  291. s += sq[pix1[11] - pix2[11]];
  292. s += sq[pix1[12] - pix2[12]];
  293. s += sq[pix1[13] - pix2[13]];
  294. s += sq[pix1[14] - pix2[14]];
  295. s += sq[pix1[15] - pix2[15]];
  296. pix1 += line_size;
  297. pix2 += line_size;
  298. }
  299. return s;
  300. }
  301. /* draw the edges of width 'w' of an image of size width, height */
  302. //FIXME check that this is ok for mpeg4 interlaced
  303. static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w)
  304. {
  305. uint8_t *ptr, *last_line;
  306. int i;
  307. last_line = buf + (height - 1) * wrap;
  308. for(i=0;i<w;i++) {
  309. /* top and bottom */
  310. memcpy(buf - (i + 1) * wrap, buf, width);
  311. memcpy(last_line + (i + 1) * wrap, last_line, width);
  312. }
  313. /* left and right */
  314. ptr = buf;
  315. for(i=0;i<height;i++) {
  316. memset(ptr - w, ptr[0], w);
  317. memset(ptr + width, ptr[width-1], w);
  318. ptr += wrap;
  319. }
  320. /* corners */
  321. for(i=0;i<w;i++) {
  322. memset(buf - (i + 1) * wrap - w, buf[0], w); /* top left */
  323. memset(buf - (i + 1) * wrap + width, buf[width-1], w); /* top right */
  324. memset(last_line + (i + 1) * wrap - w, last_line[0], w); /* top left */
  325. memset(last_line + (i + 1) * wrap + width, last_line[width-1], w); /* top right */
  326. }
  327. }
  328. /**
  329. * Copies a rectangular area of samples to a temporary buffer and replicates the boarder samples.
  330. * @param buf destination buffer
  331. * @param src source buffer
  332. * @param linesize number of bytes between 2 vertically adjacent samples in both the source and destination buffers
  333. * @param block_w width of block
  334. * @param block_h height of block
  335. * @param src_x x coordinate of the top left sample of the block in the source buffer
  336. * @param src_y y coordinate of the top left sample of the block in the source buffer
  337. * @param w width of the source buffer
  338. * @param h height of the source buffer
  339. */
  340. void ff_emulated_edge_mc(uint8_t *buf, uint8_t *src, int linesize, int block_w, int block_h,
  341. int src_x, int src_y, int w, int h){
  342. int x, y;
  343. int start_y, start_x, end_y, end_x;
  344. if(src_y>= h){
  345. src+= (h-1-src_y)*linesize;
  346. src_y=h-1;
  347. }else if(src_y<=-block_h){
  348. src+= (1-block_h-src_y)*linesize;
  349. src_y=1-block_h;
  350. }
  351. if(src_x>= w){
  352. src+= (w-1-src_x);
  353. src_x=w-1;
  354. }else if(src_x<=-block_w){
  355. src+= (1-block_w-src_x);
  356. src_x=1-block_w;
  357. }
  358. start_y= FFMAX(0, -src_y);
  359. start_x= FFMAX(0, -src_x);
  360. end_y= FFMIN(block_h, h-src_y);
  361. end_x= FFMIN(block_w, w-src_x);
  362. // copy existing part
  363. for(y=start_y; y<end_y; y++){
  364. for(x=start_x; x<end_x; x++){
  365. buf[x + y*linesize]= src[x + y*linesize];
  366. }
  367. }
  368. //top
  369. for(y=0; y<start_y; y++){
  370. for(x=start_x; x<end_x; x++){
  371. buf[x + y*linesize]= buf[x + start_y*linesize];
  372. }
  373. }
  374. //bottom
  375. for(y=end_y; y<block_h; y++){
  376. for(x=start_x; x<end_x; x++){
  377. buf[x + y*linesize]= buf[x + (end_y-1)*linesize];
  378. }
  379. }
  380. for(y=0; y<block_h; y++){
  381. //left
  382. for(x=0; x<start_x; x++){
  383. buf[x + y*linesize]= buf[start_x + y*linesize];
  384. }
  385. //right
  386. for(x=end_x; x<block_w; x++){
  387. buf[x + y*linesize]= buf[end_x - 1 + y*linesize];
  388. }
  389. }
  390. }
  391. static void get_pixels_c(DCTELEM *restrict block, const uint8_t *pixels, int line_size)
  392. {
  393. int i;
  394. /* read the pixels */
  395. for(i=0;i<8;i++) {
  396. block[0] = pixels[0];
  397. block[1] = pixels[1];
  398. block[2] = pixels[2];
  399. block[3] = pixels[3];
  400. block[4] = pixels[4];
  401. block[5] = pixels[5];
  402. block[6] = pixels[6];
  403. block[7] = pixels[7];
  404. pixels += line_size;
  405. block += 8;
  406. }
  407. }
  408. static void diff_pixels_c(DCTELEM *restrict block, const uint8_t *s1,
  409. const uint8_t *s2, int stride){
  410. int i;
  411. /* read the pixels */
  412. for(i=0;i<8;i++) {
  413. block[0] = s1[0] - s2[0];
  414. block[1] = s1[1] - s2[1];
  415. block[2] = s1[2] - s2[2];
  416. block[3] = s1[3] - s2[3];
  417. block[4] = s1[4] - s2[4];
  418. block[5] = s1[5] - s2[5];
  419. block[6] = s1[6] - s2[6];
  420. block[7] = s1[7] - s2[7];
  421. s1 += stride;
  422. s2 += stride;
  423. block += 8;
  424. }
  425. }
  426. static void put_pixels_clamped_c(const DCTELEM *block, uint8_t *restrict pixels,
  427. int line_size)
  428. {
  429. int i;
  430. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  431. /* read the pixels */
  432. for(i=0;i<8;i++) {
  433. pixels[0] = cm[block[0]];
  434. pixels[1] = cm[block[1]];
  435. pixels[2] = cm[block[2]];
  436. pixels[3] = cm[block[3]];
  437. pixels[4] = cm[block[4]];
  438. pixels[5] = cm[block[5]];
  439. pixels[6] = cm[block[6]];
  440. pixels[7] = cm[block[7]];
  441. pixels += line_size;
  442. block += 8;
  443. }
  444. }
  445. static void put_pixels_clamped4_c(const DCTELEM *block, uint8_t *restrict pixels,
  446. int line_size)
  447. {
  448. int i;
  449. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  450. /* read the pixels */
  451. for(i=0;i<4;i++) {
  452. pixels[0] = cm[block[0]];
  453. pixels[1] = cm[block[1]];
  454. pixels[2] = cm[block[2]];
  455. pixels[3] = cm[block[3]];
  456. pixels += line_size;
  457. block += 8;
  458. }
  459. }
  460. static void put_pixels_clamped2_c(const DCTELEM *block, uint8_t *restrict pixels,
  461. int line_size)
  462. {
  463. int i;
  464. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  465. /* read the pixels */
  466. for(i=0;i<2;i++) {
  467. pixels[0] = cm[block[0]];
  468. pixels[1] = cm[block[1]];
  469. pixels += line_size;
  470. block += 8;
  471. }
  472. }
  473. static void put_signed_pixels_clamped_c(const DCTELEM *block,
  474. uint8_t *restrict pixels,
  475. int line_size)
  476. {
  477. int i, j;
  478. for (i = 0; i < 8; i++) {
  479. for (j = 0; j < 8; j++) {
  480. if (*block < -128)
  481. *pixels = 0;
  482. else if (*block > 127)
  483. *pixels = 255;
  484. else
  485. *pixels = (uint8_t)(*block + 128);
  486. block++;
  487. pixels++;
  488. }
  489. pixels += (line_size - 8);
  490. }
  491. }
  492. static void put_pixels_nonclamped_c(const DCTELEM *block, uint8_t *restrict pixels,
  493. int line_size)
  494. {
  495. int i;
  496. /* read the pixels */
  497. for(i=0;i<8;i++) {
  498. pixels[0] = block[0];
  499. pixels[1] = block[1];
  500. pixels[2] = block[2];
  501. pixels[3] = block[3];
  502. pixels[4] = block[4];
  503. pixels[5] = block[5];
  504. pixels[6] = block[6];
  505. pixels[7] = block[7];
  506. pixels += line_size;
  507. block += 8;
  508. }
  509. }
  510. static void add_pixels_clamped_c(const DCTELEM *block, uint8_t *restrict pixels,
  511. int line_size)
  512. {
  513. int i;
  514. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  515. /* read the pixels */
  516. for(i=0;i<8;i++) {
  517. pixels[0] = cm[pixels[0] + block[0]];
  518. pixels[1] = cm[pixels[1] + block[1]];
  519. pixels[2] = cm[pixels[2] + block[2]];
  520. pixels[3] = cm[pixels[3] + block[3]];
  521. pixels[4] = cm[pixels[4] + block[4]];
  522. pixels[5] = cm[pixels[5] + block[5]];
  523. pixels[6] = cm[pixels[6] + block[6]];
  524. pixels[7] = cm[pixels[7] + block[7]];
  525. pixels += line_size;
  526. block += 8;
  527. }
  528. }
  529. static void add_pixels_clamped4_c(const DCTELEM *block, uint8_t *restrict pixels,
  530. int line_size)
  531. {
  532. int i;
  533. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  534. /* read the pixels */
  535. for(i=0;i<4;i++) {
  536. pixels[0] = cm[pixels[0] + block[0]];
  537. pixels[1] = cm[pixels[1] + block[1]];
  538. pixels[2] = cm[pixels[2] + block[2]];
  539. pixels[3] = cm[pixels[3] + block[3]];
  540. pixels += line_size;
  541. block += 8;
  542. }
  543. }
  544. static void add_pixels_clamped2_c(const DCTELEM *block, uint8_t *restrict pixels,
  545. int line_size)
  546. {
  547. int i;
  548. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  549. /* read the pixels */
  550. for(i=0;i<2;i++) {
  551. pixels[0] = cm[pixels[0] + block[0]];
  552. pixels[1] = cm[pixels[1] + block[1]];
  553. pixels += line_size;
  554. block += 8;
  555. }
  556. }
  557. static void add_pixels8_c(uint8_t *restrict pixels, DCTELEM *block, int line_size)
  558. {
  559. int i;
  560. for(i=0;i<8;i++) {
  561. pixels[0] += block[0];
  562. pixels[1] += block[1];
  563. pixels[2] += block[2];
  564. pixels[3] += block[3];
  565. pixels[4] += block[4];
  566. pixels[5] += block[5];
  567. pixels[6] += block[6];
  568. pixels[7] += block[7];
  569. pixels += line_size;
  570. block += 8;
  571. }
  572. }
  573. static void add_pixels4_c(uint8_t *restrict pixels, DCTELEM *block, int line_size)
  574. {
  575. int i;
  576. for(i=0;i<4;i++) {
  577. pixels[0] += block[0];
  578. pixels[1] += block[1];
  579. pixels[2] += block[2];
  580. pixels[3] += block[3];
  581. pixels += line_size;
  582. block += 4;
  583. }
  584. }
  585. static int sum_abs_dctelem_c(DCTELEM *block)
  586. {
  587. int sum=0, i;
  588. for(i=0; i<64; i++)
  589. sum+= FFABS(block[i]);
  590. return sum;
  591. }
  592. static void fill_block16_c(uint8_t *block, uint8_t value, int line_size, int h)
  593. {
  594. int i;
  595. for (i = 0; i < h; i++) {
  596. memset(block, value, 16);
  597. block += line_size;
  598. }
  599. }
  600. static void fill_block8_c(uint8_t *block, uint8_t value, int line_size, int h)
  601. {
  602. int i;
  603. for (i = 0; i < h; i++) {
  604. memset(block, value, 8);
  605. block += line_size;
  606. }
  607. }
  608. static void scale_block_c(const uint8_t src[64]/*align 8*/, uint8_t *dst/*align 8*/, int linesize)
  609. {
  610. int i, j;
  611. uint16_t *dst1 = (uint16_t *) dst;
  612. uint16_t *dst2 = (uint16_t *)(dst + linesize);
  613. for (j = 0; j < 8; j++) {
  614. for (i = 0; i < 8; i++) {
  615. dst1[i] = dst2[i] = src[i] * 0x0101;
  616. }
  617. src += 8;
  618. dst1 += linesize;
  619. dst2 += linesize;
  620. }
  621. }
  622. #if 0
  623. #define PIXOP2(OPNAME, OP) \
  624. static void OPNAME ## _pixels(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  625. {\
  626. int i;\
  627. for(i=0; i<h; i++){\
  628. OP(*((uint64_t*)block), AV_RN64(pixels));\
  629. pixels+=line_size;\
  630. block +=line_size;\
  631. }\
  632. }\
  633. \
  634. static void OPNAME ## _no_rnd_pixels_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  635. {\
  636. int i;\
  637. for(i=0; i<h; i++){\
  638. const uint64_t a= AV_RN64(pixels );\
  639. const uint64_t b= AV_RN64(pixels+1);\
  640. OP(*((uint64_t*)block), (a&b) + (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
  641. pixels+=line_size;\
  642. block +=line_size;\
  643. }\
  644. }\
  645. \
  646. static void OPNAME ## _pixels_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  647. {\
  648. int i;\
  649. for(i=0; i<h; i++){\
  650. const uint64_t a= AV_RN64(pixels );\
  651. const uint64_t b= AV_RN64(pixels+1);\
  652. OP(*((uint64_t*)block), (a|b) - (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
  653. pixels+=line_size;\
  654. block +=line_size;\
  655. }\
  656. }\
  657. \
  658. static void OPNAME ## _no_rnd_pixels_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  659. {\
  660. int i;\
  661. for(i=0; i<h; i++){\
  662. const uint64_t a= AV_RN64(pixels );\
  663. const uint64_t b= AV_RN64(pixels+line_size);\
  664. OP(*((uint64_t*)block), (a&b) + (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
  665. pixels+=line_size;\
  666. block +=line_size;\
  667. }\
  668. }\
  669. \
  670. static void OPNAME ## _pixels_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  671. {\
  672. int i;\
  673. for(i=0; i<h; i++){\
  674. const uint64_t a= AV_RN64(pixels );\
  675. const uint64_t b= AV_RN64(pixels+line_size);\
  676. OP(*((uint64_t*)block), (a|b) - (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
  677. pixels+=line_size;\
  678. block +=line_size;\
  679. }\
  680. }\
  681. \
  682. static void OPNAME ## _pixels_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  683. {\
  684. int i;\
  685. const uint64_t a= AV_RN64(pixels );\
  686. const uint64_t b= AV_RN64(pixels+1);\
  687. uint64_t l0= (a&0x0303030303030303ULL)\
  688. + (b&0x0303030303030303ULL)\
  689. + 0x0202020202020202ULL;\
  690. uint64_t h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
  691. + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
  692. uint64_t l1,h1;\
  693. \
  694. pixels+=line_size;\
  695. for(i=0; i<h; i+=2){\
  696. uint64_t a= AV_RN64(pixels );\
  697. uint64_t b= AV_RN64(pixels+1);\
  698. l1= (a&0x0303030303030303ULL)\
  699. + (b&0x0303030303030303ULL);\
  700. h1= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
  701. + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
  702. OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
  703. pixels+=line_size;\
  704. block +=line_size;\
  705. a= AV_RN64(pixels );\
  706. b= AV_RN64(pixels+1);\
  707. l0= (a&0x0303030303030303ULL)\
  708. + (b&0x0303030303030303ULL)\
  709. + 0x0202020202020202ULL;\
  710. h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
  711. + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
  712. OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
  713. pixels+=line_size;\
  714. block +=line_size;\
  715. }\
  716. }\
  717. \
  718. static void OPNAME ## _no_rnd_pixels_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  719. {\
  720. int i;\
  721. const uint64_t a= AV_RN64(pixels );\
  722. const uint64_t b= AV_RN64(pixels+1);\
  723. uint64_t l0= (a&0x0303030303030303ULL)\
  724. + (b&0x0303030303030303ULL)\
  725. + 0x0101010101010101ULL;\
  726. uint64_t h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
  727. + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
  728. uint64_t l1,h1;\
  729. \
  730. pixels+=line_size;\
  731. for(i=0; i<h; i+=2){\
  732. uint64_t a= AV_RN64(pixels );\
  733. uint64_t b= AV_RN64(pixels+1);\
  734. l1= (a&0x0303030303030303ULL)\
  735. + (b&0x0303030303030303ULL);\
  736. h1= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
  737. + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
  738. OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
  739. pixels+=line_size;\
  740. block +=line_size;\
  741. a= AV_RN64(pixels );\
  742. b= AV_RN64(pixels+1);\
  743. l0= (a&0x0303030303030303ULL)\
  744. + (b&0x0303030303030303ULL)\
  745. + 0x0101010101010101ULL;\
  746. h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
  747. + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
  748. OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
  749. pixels+=line_size;\
  750. block +=line_size;\
  751. }\
  752. }\
  753. \
  754. CALL_2X_PIXELS(OPNAME ## _pixels16_c , OPNAME ## _pixels_c , 8)\
  755. CALL_2X_PIXELS(OPNAME ## _pixels16_x2_c , OPNAME ## _pixels_x2_c , 8)\
  756. CALL_2X_PIXELS(OPNAME ## _pixels16_y2_c , OPNAME ## _pixels_y2_c , 8)\
  757. CALL_2X_PIXELS(OPNAME ## _pixels16_xy2_c, OPNAME ## _pixels_xy2_c, 8)\
  758. CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_x2_c , OPNAME ## _no_rnd_pixels_x2_c , 8)\
  759. CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_y2_c , OPNAME ## _no_rnd_pixels_y2_c , 8)\
  760. CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_xy2_c, OPNAME ## _no_rnd_pixels_xy2_c, 8)
  761. #define op_avg(a, b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEFEFEFEFEULL)>>1) )
  762. #else // 64 bit variant
  763. #define PIXOP2(OPNAME, OP) \
  764. static void OPNAME ## _pixels2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  765. int i;\
  766. for(i=0; i<h; i++){\
  767. OP(*((uint16_t*)(block )), AV_RN16(pixels ));\
  768. pixels+=line_size;\
  769. block +=line_size;\
  770. }\
  771. }\
  772. static void OPNAME ## _pixels4_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  773. int i;\
  774. for(i=0; i<h; i++){\
  775. OP(*((uint32_t*)(block )), AV_RN32(pixels ));\
  776. pixels+=line_size;\
  777. block +=line_size;\
  778. }\
  779. }\
  780. static void OPNAME ## _pixels8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  781. int i;\
  782. for(i=0; i<h; i++){\
  783. OP(*((uint32_t*)(block )), AV_RN32(pixels ));\
  784. OP(*((uint32_t*)(block+4)), AV_RN32(pixels+4));\
  785. pixels+=line_size;\
  786. block +=line_size;\
  787. }\
  788. }\
  789. static inline void OPNAME ## _no_rnd_pixels8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  790. OPNAME ## _pixels8_c(block, pixels, line_size, h);\
  791. }\
  792. \
  793. static inline void OPNAME ## _no_rnd_pixels8_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  794. int src_stride1, int src_stride2, int h){\
  795. int i;\
  796. for(i=0; i<h; i++){\
  797. uint32_t a,b;\
  798. a= AV_RN32(&src1[i*src_stride1 ]);\
  799. b= AV_RN32(&src2[i*src_stride2 ]);\
  800. OP(*((uint32_t*)&dst[i*dst_stride ]), no_rnd_avg32(a, b));\
  801. a= AV_RN32(&src1[i*src_stride1+4]);\
  802. b= AV_RN32(&src2[i*src_stride2+4]);\
  803. OP(*((uint32_t*)&dst[i*dst_stride+4]), no_rnd_avg32(a, b));\
  804. }\
  805. }\
  806. \
  807. static inline void OPNAME ## _pixels8_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  808. int src_stride1, int src_stride2, int h){\
  809. int i;\
  810. for(i=0; i<h; i++){\
  811. uint32_t a,b;\
  812. a= AV_RN32(&src1[i*src_stride1 ]);\
  813. b= AV_RN32(&src2[i*src_stride2 ]);\
  814. OP(*((uint32_t*)&dst[i*dst_stride ]), rnd_avg32(a, b));\
  815. a= AV_RN32(&src1[i*src_stride1+4]);\
  816. b= AV_RN32(&src2[i*src_stride2+4]);\
  817. OP(*((uint32_t*)&dst[i*dst_stride+4]), rnd_avg32(a, b));\
  818. }\
  819. }\
  820. \
  821. static inline void OPNAME ## _pixels4_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  822. int src_stride1, int src_stride2, int h){\
  823. int i;\
  824. for(i=0; i<h; i++){\
  825. uint32_t a,b;\
  826. a= AV_RN32(&src1[i*src_stride1 ]);\
  827. b= AV_RN32(&src2[i*src_stride2 ]);\
  828. OP(*((uint32_t*)&dst[i*dst_stride ]), rnd_avg32(a, b));\
  829. }\
  830. }\
  831. \
  832. static inline void OPNAME ## _pixels2_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  833. int src_stride1, int src_stride2, int h){\
  834. int i;\
  835. for(i=0; i<h; i++){\
  836. uint32_t a,b;\
  837. a= AV_RN16(&src1[i*src_stride1 ]);\
  838. b= AV_RN16(&src2[i*src_stride2 ]);\
  839. OP(*((uint16_t*)&dst[i*dst_stride ]), rnd_avg32(a, b));\
  840. }\
  841. }\
  842. \
  843. static inline void OPNAME ## _pixels16_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  844. int src_stride1, int src_stride2, int h){\
  845. OPNAME ## _pixels8_l2(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);\
  846. OPNAME ## _pixels8_l2(dst+8, src1+8, src2+8, dst_stride, src_stride1, src_stride2, h);\
  847. }\
  848. \
  849. static inline void OPNAME ## _no_rnd_pixels16_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
  850. int src_stride1, int src_stride2, int h){\
  851. OPNAME ## _no_rnd_pixels8_l2(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);\
  852. OPNAME ## _no_rnd_pixels8_l2(dst+8, src1+8, src2+8, dst_stride, src_stride1, src_stride2, h);\
  853. }\
  854. \
  855. static inline void OPNAME ## _no_rnd_pixels8_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  856. OPNAME ## _no_rnd_pixels8_l2(block, pixels, pixels+1, line_size, line_size, line_size, h);\
  857. }\
  858. \
  859. static inline void OPNAME ## _pixels8_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  860. OPNAME ## _pixels8_l2(block, pixels, pixels+1, line_size, line_size, line_size, h);\
  861. }\
  862. \
  863. static inline void OPNAME ## _no_rnd_pixels8_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  864. OPNAME ## _no_rnd_pixels8_l2(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
  865. }\
  866. \
  867. static inline void OPNAME ## _pixels8_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  868. OPNAME ## _pixels8_l2(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
  869. }\
  870. \
  871. static inline void OPNAME ## _pixels8_l4(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,\
  872. int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  873. int i;\
  874. for(i=0; i<h; i++){\
  875. uint32_t a, b, c, d, l0, l1, h0, h1;\
  876. a= AV_RN32(&src1[i*src_stride1]);\
  877. b= AV_RN32(&src2[i*src_stride2]);\
  878. c= AV_RN32(&src3[i*src_stride3]);\
  879. d= AV_RN32(&src4[i*src_stride4]);\
  880. l0= (a&0x03030303UL)\
  881. + (b&0x03030303UL)\
  882. + 0x02020202UL;\
  883. h0= ((a&0xFCFCFCFCUL)>>2)\
  884. + ((b&0xFCFCFCFCUL)>>2);\
  885. l1= (c&0x03030303UL)\
  886. + (d&0x03030303UL);\
  887. h1= ((c&0xFCFCFCFCUL)>>2)\
  888. + ((d&0xFCFCFCFCUL)>>2);\
  889. OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  890. a= AV_RN32(&src1[i*src_stride1+4]);\
  891. b= AV_RN32(&src2[i*src_stride2+4]);\
  892. c= AV_RN32(&src3[i*src_stride3+4]);\
  893. d= AV_RN32(&src4[i*src_stride4+4]);\
  894. l0= (a&0x03030303UL)\
  895. + (b&0x03030303UL)\
  896. + 0x02020202UL;\
  897. h0= ((a&0xFCFCFCFCUL)>>2)\
  898. + ((b&0xFCFCFCFCUL)>>2);\
  899. l1= (c&0x03030303UL)\
  900. + (d&0x03030303UL);\
  901. h1= ((c&0xFCFCFCFCUL)>>2)\
  902. + ((d&0xFCFCFCFCUL)>>2);\
  903. OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  904. }\
  905. }\
  906. \
  907. static inline void OPNAME ## _pixels4_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  908. OPNAME ## _pixels4_l2(block, pixels, pixels+1, line_size, line_size, line_size, h);\
  909. }\
  910. \
  911. static inline void OPNAME ## _pixels4_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  912. OPNAME ## _pixels4_l2(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
  913. }\
  914. \
  915. static inline void OPNAME ## _pixels2_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  916. OPNAME ## _pixels2_l2(block, pixels, pixels+1, line_size, line_size, line_size, h);\
  917. }\
  918. \
  919. static inline void OPNAME ## _pixels2_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
  920. OPNAME ## _pixels2_l2(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
  921. }\
  922. \
  923. static inline void OPNAME ## _no_rnd_pixels8_l4(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,\
  924. int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  925. int i;\
  926. for(i=0; i<h; i++){\
  927. uint32_t a, b, c, d, l0, l1, h0, h1;\
  928. a= AV_RN32(&src1[i*src_stride1]);\
  929. b= AV_RN32(&src2[i*src_stride2]);\
  930. c= AV_RN32(&src3[i*src_stride3]);\
  931. d= AV_RN32(&src4[i*src_stride4]);\
  932. l0= (a&0x03030303UL)\
  933. + (b&0x03030303UL)\
  934. + 0x01010101UL;\
  935. h0= ((a&0xFCFCFCFCUL)>>2)\
  936. + ((b&0xFCFCFCFCUL)>>2);\
  937. l1= (c&0x03030303UL)\
  938. + (d&0x03030303UL);\
  939. h1= ((c&0xFCFCFCFCUL)>>2)\
  940. + ((d&0xFCFCFCFCUL)>>2);\
  941. OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  942. a= AV_RN32(&src1[i*src_stride1+4]);\
  943. b= AV_RN32(&src2[i*src_stride2+4]);\
  944. c= AV_RN32(&src3[i*src_stride3+4]);\
  945. d= AV_RN32(&src4[i*src_stride4+4]);\
  946. l0= (a&0x03030303UL)\
  947. + (b&0x03030303UL)\
  948. + 0x01010101UL;\
  949. h0= ((a&0xFCFCFCFCUL)>>2)\
  950. + ((b&0xFCFCFCFCUL)>>2);\
  951. l1= (c&0x03030303UL)\
  952. + (d&0x03030303UL);\
  953. h1= ((c&0xFCFCFCFCUL)>>2)\
  954. + ((d&0xFCFCFCFCUL)>>2);\
  955. OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  956. }\
  957. }\
  958. static inline void OPNAME ## _pixels16_l4(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,\
  959. int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  960. OPNAME ## _pixels8_l4(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
  961. OPNAME ## _pixels8_l4(dst+8, src1+8, src2+8, src3+8, src4+8, dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
  962. }\
  963. static inline void OPNAME ## _no_rnd_pixels16_l4(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,\
  964. int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
  965. OPNAME ## _no_rnd_pixels8_l4(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
  966. OPNAME ## _no_rnd_pixels8_l4(dst+8, src1+8, src2+8, src3+8, src4+8, dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
  967. }\
  968. \
  969. static inline void OPNAME ## _pixels2_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  970. {\
  971. int i, a0, b0, a1, b1;\
  972. a0= pixels[0];\
  973. b0= pixels[1] + 2;\
  974. a0 += b0;\
  975. b0 += pixels[2];\
  976. \
  977. pixels+=line_size;\
  978. for(i=0; i<h; i+=2){\
  979. a1= pixels[0];\
  980. b1= pixels[1];\
  981. a1 += b1;\
  982. b1 += pixels[2];\
  983. \
  984. block[0]= (a1+a0)>>2; /* FIXME non put */\
  985. block[1]= (b1+b0)>>2;\
  986. \
  987. pixels+=line_size;\
  988. block +=line_size;\
  989. \
  990. a0= pixels[0];\
  991. b0= pixels[1] + 2;\
  992. a0 += b0;\
  993. b0 += pixels[2];\
  994. \
  995. block[0]= (a1+a0)>>2;\
  996. block[1]= (b1+b0)>>2;\
  997. pixels+=line_size;\
  998. block +=line_size;\
  999. }\
  1000. }\
  1001. \
  1002. static inline void OPNAME ## _pixels4_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  1003. {\
  1004. int i;\
  1005. const uint32_t a= AV_RN32(pixels );\
  1006. const uint32_t b= AV_RN32(pixels+1);\
  1007. uint32_t l0= (a&0x03030303UL)\
  1008. + (b&0x03030303UL)\
  1009. + 0x02020202UL;\
  1010. uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
  1011. + ((b&0xFCFCFCFCUL)>>2);\
  1012. uint32_t l1,h1;\
  1013. \
  1014. pixels+=line_size;\
  1015. for(i=0; i<h; i+=2){\
  1016. uint32_t a= AV_RN32(pixels );\
  1017. uint32_t b= AV_RN32(pixels+1);\
  1018. l1= (a&0x03030303UL)\
  1019. + (b&0x03030303UL);\
  1020. h1= ((a&0xFCFCFCFCUL)>>2)\
  1021. + ((b&0xFCFCFCFCUL)>>2);\
  1022. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  1023. pixels+=line_size;\
  1024. block +=line_size;\
  1025. a= AV_RN32(pixels );\
  1026. b= AV_RN32(pixels+1);\
  1027. l0= (a&0x03030303UL)\
  1028. + (b&0x03030303UL)\
  1029. + 0x02020202UL;\
  1030. h0= ((a&0xFCFCFCFCUL)>>2)\
  1031. + ((b&0xFCFCFCFCUL)>>2);\
  1032. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  1033. pixels+=line_size;\
  1034. block +=line_size;\
  1035. }\
  1036. }\
  1037. \
  1038. static inline void OPNAME ## _pixels8_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  1039. {\
  1040. int j;\
  1041. for(j=0; j<2; j++){\
  1042. int i;\
  1043. const uint32_t a= AV_RN32(pixels );\
  1044. const uint32_t b= AV_RN32(pixels+1);\
  1045. uint32_t l0= (a&0x03030303UL)\
  1046. + (b&0x03030303UL)\
  1047. + 0x02020202UL;\
  1048. uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
  1049. + ((b&0xFCFCFCFCUL)>>2);\
  1050. uint32_t l1,h1;\
  1051. \
  1052. pixels+=line_size;\
  1053. for(i=0; i<h; i+=2){\
  1054. uint32_t a= AV_RN32(pixels );\
  1055. uint32_t b= AV_RN32(pixels+1);\
  1056. l1= (a&0x03030303UL)\
  1057. + (b&0x03030303UL);\
  1058. h1= ((a&0xFCFCFCFCUL)>>2)\
  1059. + ((b&0xFCFCFCFCUL)>>2);\
  1060. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  1061. pixels+=line_size;\
  1062. block +=line_size;\
  1063. a= AV_RN32(pixels );\
  1064. b= AV_RN32(pixels+1);\
  1065. l0= (a&0x03030303UL)\
  1066. + (b&0x03030303UL)\
  1067. + 0x02020202UL;\
  1068. h0= ((a&0xFCFCFCFCUL)>>2)\
  1069. + ((b&0xFCFCFCFCUL)>>2);\
  1070. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  1071. pixels+=line_size;\
  1072. block +=line_size;\
  1073. }\
  1074. pixels+=4-line_size*(h+1);\
  1075. block +=4-line_size*h;\
  1076. }\
  1077. }\
  1078. \
  1079. static inline void OPNAME ## _no_rnd_pixels8_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
  1080. {\
  1081. int j;\
  1082. for(j=0; j<2; j++){\
  1083. int i;\
  1084. const uint32_t a= AV_RN32(pixels );\
  1085. const uint32_t b= AV_RN32(pixels+1);\
  1086. uint32_t l0= (a&0x03030303UL)\
  1087. + (b&0x03030303UL)\
  1088. + 0x01010101UL;\
  1089. uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
  1090. + ((b&0xFCFCFCFCUL)>>2);\
  1091. uint32_t l1,h1;\
  1092. \
  1093. pixels+=line_size;\
  1094. for(i=0; i<h; i+=2){\
  1095. uint32_t a= AV_RN32(pixels );\
  1096. uint32_t b= AV_RN32(pixels+1);\
  1097. l1= (a&0x03030303UL)\
  1098. + (b&0x03030303UL);\
  1099. h1= ((a&0xFCFCFCFCUL)>>2)\
  1100. + ((b&0xFCFCFCFCUL)>>2);\
  1101. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  1102. pixels+=line_size;\
  1103. block +=line_size;\
  1104. a= AV_RN32(pixels );\
  1105. b= AV_RN32(pixels+1);\
  1106. l0= (a&0x03030303UL)\
  1107. + (b&0x03030303UL)\
  1108. + 0x01010101UL;\
  1109. h0= ((a&0xFCFCFCFCUL)>>2)\
  1110. + ((b&0xFCFCFCFCUL)>>2);\
  1111. OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
  1112. pixels+=line_size;\
  1113. block +=line_size;\
  1114. }\
  1115. pixels+=4-line_size*(h+1);\
  1116. block +=4-line_size*h;\
  1117. }\
  1118. }\
  1119. \
  1120. CALL_2X_PIXELS(OPNAME ## _pixels16_c , OPNAME ## _pixels8_c , 8)\
  1121. CALL_2X_PIXELS(OPNAME ## _pixels16_x2_c , OPNAME ## _pixels8_x2_c , 8)\
  1122. CALL_2X_PIXELS(OPNAME ## _pixels16_y2_c , OPNAME ## _pixels8_y2_c , 8)\
  1123. CALL_2X_PIXELS(OPNAME ## _pixels16_xy2_c, OPNAME ## _pixels8_xy2_c, 8)\
  1124. CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_c , OPNAME ## _pixels8_c , 8)\
  1125. CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_x2_c , OPNAME ## _no_rnd_pixels8_x2_c , 8)\
  1126. CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_y2_c , OPNAME ## _no_rnd_pixels8_y2_c , 8)\
  1127. CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_xy2_c, OPNAME ## _no_rnd_pixels8_xy2_c, 8)\
  1128. #define op_avg(a, b) a = rnd_avg32(a, b)
  1129. #endif
  1130. #define op_put(a, b) a = b
  1131. PIXOP2(avg, op_avg)
  1132. PIXOP2(put, op_put)
  1133. #undef op_avg
  1134. #undef op_put
  1135. #define avg2(a,b) ((a+b+1)>>1)
  1136. #define avg4(a,b,c,d) ((a+b+c+d+2)>>2)
  1137. static void put_no_rnd_pixels16_l2_c(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h){
  1138. put_no_rnd_pixels16_l2(dst, a, b, stride, stride, stride, h);
  1139. }
  1140. static void put_no_rnd_pixels8_l2_c(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h){
  1141. put_no_rnd_pixels8_l2(dst, a, b, stride, stride, stride, h);
  1142. }
  1143. static void gmc1_c(uint8_t *dst, uint8_t *src, int stride, int h, int x16, int y16, int rounder)
  1144. {
  1145. const int A=(16-x16)*(16-y16);
  1146. const int B=( x16)*(16-y16);
  1147. const int C=(16-x16)*( y16);
  1148. const int D=( x16)*( y16);
  1149. int i;
  1150. for(i=0; i<h; i++)
  1151. {
  1152. dst[0]= (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + rounder)>>8;
  1153. dst[1]= (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + rounder)>>8;
  1154. dst[2]= (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + rounder)>>8;
  1155. dst[3]= (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + rounder)>>8;
  1156. dst[4]= (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + rounder)>>8;
  1157. dst[5]= (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + rounder)>>8;
  1158. dst[6]= (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + rounder)>>8;
  1159. dst[7]= (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + rounder)>>8;
  1160. dst+= stride;
  1161. src+= stride;
  1162. }
  1163. }
  1164. void ff_gmc_c(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
  1165. int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
  1166. {
  1167. int y, vx, vy;
  1168. const int s= 1<<shift;
  1169. width--;
  1170. height--;
  1171. for(y=0; y<h; y++){
  1172. int x;
  1173. vx= ox;
  1174. vy= oy;
  1175. for(x=0; x<8; x++){ //XXX FIXME optimize
  1176. int src_x, src_y, frac_x, frac_y, index;
  1177. src_x= vx>>16;
  1178. src_y= vy>>16;
  1179. frac_x= src_x&(s-1);
  1180. frac_y= src_y&(s-1);
  1181. src_x>>=shift;
  1182. src_y>>=shift;
  1183. if((unsigned)src_x < width){
  1184. if((unsigned)src_y < height){
  1185. index= src_x + src_y*stride;
  1186. dst[y*stride + x]= ( ( src[index ]*(s-frac_x)
  1187. + src[index +1]* frac_x )*(s-frac_y)
  1188. + ( src[index+stride ]*(s-frac_x)
  1189. + src[index+stride+1]* frac_x )* frac_y
  1190. + r)>>(shift*2);
  1191. }else{
  1192. index= src_x + av_clip(src_y, 0, height)*stride;
  1193. dst[y*stride + x]= ( ( src[index ]*(s-frac_x)
  1194. + src[index +1]* frac_x )*s
  1195. + r)>>(shift*2);
  1196. }
  1197. }else{
  1198. if((unsigned)src_y < height){
  1199. index= av_clip(src_x, 0, width) + src_y*stride;
  1200. dst[y*stride + x]= ( ( src[index ]*(s-frac_y)
  1201. + src[index+stride ]* frac_y )*s
  1202. + r)>>(shift*2);
  1203. }else{
  1204. index= av_clip(src_x, 0, width) + av_clip(src_y, 0, height)*stride;
  1205. dst[y*stride + x]= src[index ];
  1206. }
  1207. }
  1208. vx+= dxx;
  1209. vy+= dyx;
  1210. }
  1211. ox += dxy;
  1212. oy += dyy;
  1213. }
  1214. }
  1215. static inline void put_tpel_pixels_mc00_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
  1216. switch(width){
  1217. case 2: put_pixels2_c (dst, src, stride, height); break;
  1218. case 4: put_pixels4_c (dst, src, stride, height); break;
  1219. case 8: put_pixels8_c (dst, src, stride, height); break;
  1220. case 16:put_pixels16_c(dst, src, stride, height); break;
  1221. }
  1222. }
  1223. static inline void put_tpel_pixels_mc10_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
  1224. int i,j;
  1225. for (i=0; i < height; i++) {
  1226. for (j=0; j < width; j++) {
  1227. dst[j] = (683*(2*src[j] + src[j+1] + 1)) >> 11;
  1228. }
  1229. src += stride;
  1230. dst += stride;
  1231. }
  1232. }
  1233. static inline void put_tpel_pixels_mc20_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
  1234. int i,j;
  1235. for (i=0; i < height; i++) {
  1236. for (j=0; j < width; j++) {
  1237. dst[j] = (683*(src[j] + 2*src[j+1] + 1)) >> 11;
  1238. }
  1239. src += stride;
  1240. dst += stride;
  1241. }
  1242. }
  1243. static inline void put_tpel_pixels_mc01_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
  1244. int i,j;
  1245. for (i=0; i < height; i++) {
  1246. for (j=0; j < width; j++) {
  1247. dst[j] = (683*(2*src[j] + src[j+stride] + 1)) >> 11;
  1248. }
  1249. src += stride;
  1250. dst += stride;
  1251. }
  1252. }
  1253. static inline void put_tpel_pixels_mc11_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
  1254. int i,j;
  1255. for (i=0; i < height; i++) {
  1256. for (j=0; j < width; j++) {
  1257. dst[j] = (2731*(4*src[j] + 3*src[j+1] + 3*src[j+stride] + 2*src[j+stride+1] + 6)) >> 15;
  1258. }
  1259. src += stride;
  1260. dst += stride;
  1261. }
  1262. }
  1263. static inline void put_tpel_pixels_mc12_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
  1264. int i,j;
  1265. for (i=0; i < height; i++) {
  1266. for (j=0; j < width; j++) {
  1267. dst[j] = (2731*(3*src[j] + 2*src[j+1] + 4*src[j+stride] + 3*src[j+stride+1] + 6)) >> 15;
  1268. }
  1269. src += stride;
  1270. dst += stride;
  1271. }
  1272. }
  1273. static inline void put_tpel_pixels_mc02_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
  1274. int i,j;
  1275. for (i=0; i < height; i++) {
  1276. for (j=0; j < width; j++) {
  1277. dst[j] = (683*(src[j] + 2*src[j+stride] + 1)) >> 11;
  1278. }
  1279. src += stride;
  1280. dst += stride;
  1281. }
  1282. }
  1283. static inline void put_tpel_pixels_mc21_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
  1284. int i,j;
  1285. for (i=0; i < height; i++) {
  1286. for (j=0; j < width; j++) {
  1287. dst[j] = (2731*(3*src[j] + 4*src[j+1] + 2*src[j+stride] + 3*src[j+stride+1] + 6)) >> 15;
  1288. }
  1289. src += stride;
  1290. dst += stride;
  1291. }
  1292. }
  1293. static inline void put_tpel_pixels_mc22_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
  1294. int i,j;
  1295. for (i=0; i < height; i++) {
  1296. for (j=0; j < width; j++) {
  1297. dst[j] = (2731*(2*src[j] + 3*src[j+1] + 3*src[j+stride] + 4*src[j+stride+1] + 6)) >> 15;
  1298. }
  1299. src += stride;
  1300. dst += stride;
  1301. }
  1302. }
  1303. static inline void avg_tpel_pixels_mc00_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
  1304. switch(width){
  1305. case 2: avg_pixels2_c (dst, src, stride, height); break;
  1306. case 4: avg_pixels4_c (dst, src, stride, height); break;
  1307. case 8: avg_pixels8_c (dst, src, stride, height); break;
  1308. case 16:avg_pixels16_c(dst, src, stride, height); break;
  1309. }
  1310. }
  1311. static inline void avg_tpel_pixels_mc10_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
  1312. int i,j;
  1313. for (i=0; i < height; i++) {
  1314. for (j=0; j < width; j++) {
  1315. dst[j] = (dst[j] + ((683*(2*src[j] + src[j+1] + 1)) >> 11) + 1) >> 1;
  1316. }
  1317. src += stride;
  1318. dst += stride;
  1319. }
  1320. }
  1321. static inline void avg_tpel_pixels_mc20_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
  1322. int i,j;
  1323. for (i=0; i < height; i++) {
  1324. for (j=0; j < width; j++) {
  1325. dst[j] = (dst[j] + ((683*(src[j] + 2*src[j+1] + 1)) >> 11) + 1) >> 1;
  1326. }
  1327. src += stride;
  1328. dst += stride;
  1329. }
  1330. }
  1331. static inline void avg_tpel_pixels_mc01_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
  1332. int i,j;
  1333. for (i=0; i < height; i++) {
  1334. for (j=0; j < width; j++) {
  1335. dst[j] = (dst[j] + ((683*(2*src[j] + src[j+stride] + 1)) >> 11) + 1) >> 1;
  1336. }
  1337. src += stride;
  1338. dst += stride;
  1339. }
  1340. }
  1341. static inline void avg_tpel_pixels_mc11_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
  1342. int i,j;
  1343. for (i=0; i < height; i++) {
  1344. for (j=0; j < width; j++) {
  1345. dst[j] = (dst[j] + ((2731*(4*src[j] + 3*src[j+1] + 3*src[j+stride] + 2*src[j+stride+1] + 6)) >> 15) + 1) >> 1;
  1346. }
  1347. src += stride;
  1348. dst += stride;
  1349. }
  1350. }
  1351. static inline void avg_tpel_pixels_mc12_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
  1352. int i,j;
  1353. for (i=0; i < height; i++) {
  1354. for (j=0; j < width; j++) {
  1355. dst[j] = (dst[j] + ((2731*(3*src[j] + 2*src[j+1] + 4*src[j+stride] + 3*src[j+stride+1] + 6)) >> 15) + 1) >> 1;
  1356. }
  1357. src += stride;
  1358. dst += stride;
  1359. }
  1360. }
  1361. static inline void avg_tpel_pixels_mc02_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
  1362. int i,j;
  1363. for (i=0; i < height; i++) {
  1364. for (j=0; j < width; j++) {
  1365. dst[j] = (dst[j] + ((683*(src[j] + 2*src[j+stride] + 1)) >> 11) + 1) >> 1;
  1366. }
  1367. src += stride;
  1368. dst += stride;
  1369. }
  1370. }
  1371. static inline void avg_tpel_pixels_mc21_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
  1372. int i,j;
  1373. for (i=0; i < height; i++) {
  1374. for (j=0; j < width; j++) {
  1375. dst[j] = (dst[j] + ((2731*(3*src[j] + 4*src[j+1] + 2*src[j+stride] + 3*src[j+stride+1] + 6)) >> 15) + 1) >> 1;
  1376. }
  1377. src += stride;
  1378. dst += stride;
  1379. }
  1380. }
  1381. static inline void avg_tpel_pixels_mc22_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
  1382. int i,j;
  1383. for (i=0; i < height; i++) {
  1384. for (j=0; j < width; j++) {
  1385. dst[j] = (dst[j] + ((2731*(2*src[j] + 3*src[j+1] + 3*src[j+stride] + 4*src[j+stride+1] + 6)) >> 15) + 1) >> 1;
  1386. }
  1387. src += stride;
  1388. dst += stride;
  1389. }
  1390. }
  1391. #if 0
  1392. #define TPEL_WIDTH(width)\
  1393. static void put_tpel_pixels ## width ## _mc00_c(uint8_t *dst, const uint8_t *src, int stride, int height){\
  1394. void put_tpel_pixels_mc00_c(dst, src, stride, width, height);}\
  1395. static void put_tpel_pixels ## width ## _mc10_c(uint8_t *dst, const uint8_t *src, int stride, int height){\
  1396. void put_tpel_pixels_mc10_c(dst, src, stride, width, height);}\
  1397. static void put_tpel_pixels ## width ## _mc20_c(uint8_t *dst, const uint8_t *src, int stride, int height){\
  1398. void put_tpel_pixels_mc20_c(dst, src, stride, width, height);}\
  1399. static void put_tpel_pixels ## width ## _mc01_c(uint8_t *dst, const uint8_t *src, int stride, int height){\
  1400. void put_tpel_pixels_mc01_c(dst, src, stride, width, height);}\
  1401. static void put_tpel_pixels ## width ## _mc11_c(uint8_t *dst, const uint8_t *src, int stride, int height){\
  1402. void put_tpel_pixels_mc11_c(dst, src, stride, width, height);}\
  1403. static void put_tpel_pixels ## width ## _mc21_c(uint8_t *dst, const uint8_t *src, int stride, int height){\
  1404. void put_tpel_pixels_mc21_c(dst, src, stride, width, height);}\
  1405. static void put_tpel_pixels ## width ## _mc02_c(uint8_t *dst, const uint8_t *src, int stride, int height){\
  1406. void put_tpel_pixels_mc02_c(dst, src, stride, width, height);}\
  1407. static void put_tpel_pixels ## width ## _mc12_c(uint8_t *dst, const uint8_t *src, int stride, int height){\
  1408. void put_tpel_pixels_mc12_c(dst, src, stride, width, height);}\
  1409. static void put_tpel_pixels ## width ## _mc22_c(uint8_t *dst, const uint8_t *src, int stride, int height){\
  1410. void put_tpel_pixels_mc22_c(dst, src, stride, width, height);}
  1411. #endif
  1412. #define H264_CHROMA_MC(OPNAME, OP)\
  1413. static void OPNAME ## h264_chroma_mc2_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
  1414. const int A=(8-x)*(8-y);\
  1415. const int B=( x)*(8-y);\
  1416. const int C=(8-x)*( y);\
  1417. const int D=( x)*( y);\
  1418. int i;\
  1419. \
  1420. assert(x<8 && y<8 && x>=0 && y>=0);\
  1421. \
  1422. if(D){\
  1423. for(i=0; i<h; i++){\
  1424. OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
  1425. OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
  1426. dst+= stride;\
  1427. src+= stride;\
  1428. }\
  1429. }else{\
  1430. const int E= B+C;\
  1431. const int step= C ? stride : 1;\
  1432. for(i=0; i<h; i++){\
  1433. OP(dst[0], (A*src[0] + E*src[step+0]));\
  1434. OP(dst[1], (A*src[1] + E*src[step+1]));\
  1435. dst+= stride;\
  1436. src+= stride;\
  1437. }\
  1438. }\
  1439. }\
  1440. \
  1441. static void OPNAME ## h264_chroma_mc4_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
  1442. const int A=(8-x)*(8-y);\
  1443. const int B=( x)*(8-y);\
  1444. const int C=(8-x)*( y);\
  1445. const int D=( x)*( y);\
  1446. int i;\
  1447. \
  1448. assert(x<8 && y<8 && x>=0 && y>=0);\
  1449. \
  1450. if(D){\
  1451. for(i=0; i<h; i++){\
  1452. OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
  1453. OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
  1454. OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
  1455. OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
  1456. dst+= stride;\
  1457. src+= stride;\
  1458. }\
  1459. }else{\
  1460. const int E= B+C;\
  1461. const int step= C ? stride : 1;\
  1462. for(i=0; i<h; i++){\
  1463. OP(dst[0], (A*src[0] + E*src[step+0]));\
  1464. OP(dst[1], (A*src[1] + E*src[step+1]));\
  1465. OP(dst[2], (A*src[2] + E*src[step+2]));\
  1466. OP(dst[3], (A*src[3] + E*src[step+3]));\
  1467. dst+= stride;\
  1468. src+= stride;\
  1469. }\
  1470. }\
  1471. }\
  1472. \
  1473. static void OPNAME ## h264_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
  1474. const int A=(8-x)*(8-y);\
  1475. const int B=( x)*(8-y);\
  1476. const int C=(8-x)*( y);\
  1477. const int D=( x)*( y);\
  1478. int i;\
  1479. \
  1480. assert(x<8 && y<8 && x>=0 && y>=0);\
  1481. \
  1482. if(D){\
  1483. for(i=0; i<h; i++){\
  1484. OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
  1485. OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
  1486. OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
  1487. OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
  1488. OP(dst[4], (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5]));\
  1489. OP(dst[5], (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6]));\
  1490. OP(dst[6], (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7]));\
  1491. OP(dst[7], (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8]));\
  1492. dst+= stride;\
  1493. src+= stride;\
  1494. }\
  1495. }else{\
  1496. const int E= B+C;\
  1497. const int step= C ? stride : 1;\
  1498. for(i=0; i<h; i++){\
  1499. OP(dst[0], (A*src[0] + E*src[step+0]));\
  1500. OP(dst[1], (A*src[1] + E*src[step+1]));\
  1501. OP(dst[2], (A*src[2] + E*src[step+2]));\
  1502. OP(dst[3], (A*src[3] + E*src[step+3]));\
  1503. OP(dst[4], (A*src[4] + E*src[step+4]));\
  1504. OP(dst[5], (A*src[5] + E*src[step+5]));\
  1505. OP(dst[6], (A*src[6] + E*src[step+6]));\
  1506. OP(dst[7], (A*src[7] + E*src[step+7]));\
  1507. dst+= stride;\
  1508. src+= stride;\
  1509. }\
  1510. }\
  1511. }
  1512. #define op_avg(a, b) a = (((a)+(((b) + 32)>>6)+1)>>1)
  1513. #define op_put(a, b) a = (((b) + 32)>>6)
  1514. H264_CHROMA_MC(put_ , op_put)
  1515. H264_CHROMA_MC(avg_ , op_avg)
  1516. #undef op_avg
  1517. #undef op_put
  1518. static void put_no_rnd_vc1_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){
  1519. const int A=(8-x)*(8-y);
  1520. const int B=( x)*(8-y);
  1521. const int C=(8-x)*( y);
  1522. const int D=( x)*( y);
  1523. int i;
  1524. assert(x<8 && y<8 && x>=0 && y>=0);
  1525. for(i=0; i<h; i++)
  1526. {
  1527. dst[0] = (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + 32 - 4) >> 6;
  1528. dst[1] = (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + 32 - 4) >> 6;
  1529. dst[2] = (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + 32 - 4) >> 6;
  1530. dst[3] = (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + 32 - 4) >> 6;
  1531. dst[4] = (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + 32 - 4) >> 6;
  1532. dst[5] = (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + 32 - 4) >> 6;
  1533. dst[6] = (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + 32 - 4) >> 6;
  1534. dst[7] = (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + 32 - 4) >> 6;
  1535. dst+= stride;
  1536. src+= stride;
  1537. }
  1538. }
  1539. static void avg_no_rnd_vc1_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){
  1540. const int A=(8-x)*(8-y);
  1541. const int B=( x)*(8-y);
  1542. const int C=(8-x)*( y);
  1543. const int D=( x)*( y);
  1544. int i;
  1545. assert(x<8 && y<8 && x>=0 && y>=0);
  1546. for(i=0; i<h; i++)
  1547. {
  1548. dst[0] = avg2(dst[0], ((A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + 32 - 4) >> 6));
  1549. dst[1] = avg2(dst[1], ((A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + 32 - 4) >> 6));
  1550. dst[2] = avg2(dst[2], ((A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + 32 - 4) >> 6));
  1551. dst[3] = avg2(dst[3], ((A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + 32 - 4) >> 6));
  1552. dst[4] = avg2(dst[4], ((A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + 32 - 4) >> 6));
  1553. dst[5] = avg2(dst[5], ((A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + 32 - 4) >> 6));
  1554. dst[6] = avg2(dst[6], ((A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + 32 - 4) >> 6));
  1555. dst[7] = avg2(dst[7], ((A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + 32 - 4) >> 6));
  1556. dst+= stride;
  1557. src+= stride;
  1558. }
  1559. }
  1560. #define QPEL_MC(r, OPNAME, RND, OP) \
  1561. static void OPNAME ## mpeg4_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1562. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  1563. int i;\
  1564. for(i=0; i<h; i++)\
  1565. {\
  1566. OP(dst[0], (src[0]+src[1])*20 - (src[0]+src[2])*6 + (src[1]+src[3])*3 - (src[2]+src[4]));\
  1567. OP(dst[1], (src[1]+src[2])*20 - (src[0]+src[3])*6 + (src[0]+src[4])*3 - (src[1]+src[5]));\
  1568. OP(dst[2], (src[2]+src[3])*20 - (src[1]+src[4])*6 + (src[0]+src[5])*3 - (src[0]+src[6]));\
  1569. OP(dst[3], (src[3]+src[4])*20 - (src[2]+src[5])*6 + (src[1]+src[6])*3 - (src[0]+src[7]));\
  1570. OP(dst[4], (src[4]+src[5])*20 - (src[3]+src[6])*6 + (src[2]+src[7])*3 - (src[1]+src[8]));\
  1571. OP(dst[5], (src[5]+src[6])*20 - (src[4]+src[7])*6 + (src[3]+src[8])*3 - (src[2]+src[8]));\
  1572. OP(dst[6], (src[6]+src[7])*20 - (src[5]+src[8])*6 + (src[4]+src[8])*3 - (src[3]+src[7]));\
  1573. OP(dst[7], (src[7]+src[8])*20 - (src[6]+src[8])*6 + (src[5]+src[7])*3 - (src[4]+src[6]));\
  1574. dst+=dstStride;\
  1575. src+=srcStride;\
  1576. }\
  1577. }\
  1578. \
  1579. static void OPNAME ## mpeg4_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1580. const int w=8;\
  1581. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  1582. int i;\
  1583. for(i=0; i<w; i++)\
  1584. {\
  1585. const int src0= src[0*srcStride];\
  1586. const int src1= src[1*srcStride];\
  1587. const int src2= src[2*srcStride];\
  1588. const int src3= src[3*srcStride];\
  1589. const int src4= src[4*srcStride];\
  1590. const int src5= src[5*srcStride];\
  1591. const int src6= src[6*srcStride];\
  1592. const int src7= src[7*srcStride];\
  1593. const int src8= src[8*srcStride];\
  1594. OP(dst[0*dstStride], (src0+src1)*20 - (src0+src2)*6 + (src1+src3)*3 - (src2+src4));\
  1595. OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*6 + (src0+src4)*3 - (src1+src5));\
  1596. OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*6 + (src0+src5)*3 - (src0+src6));\
  1597. OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*6 + (src1+src6)*3 - (src0+src7));\
  1598. OP(dst[4*dstStride], (src4+src5)*20 - (src3+src6)*6 + (src2+src7)*3 - (src1+src8));\
  1599. OP(dst[5*dstStride], (src5+src6)*20 - (src4+src7)*6 + (src3+src8)*3 - (src2+src8));\
  1600. OP(dst[6*dstStride], (src6+src7)*20 - (src5+src8)*6 + (src4+src8)*3 - (src3+src7));\
  1601. OP(dst[7*dstStride], (src7+src8)*20 - (src6+src8)*6 + (src5+src7)*3 - (src4+src6));\
  1602. dst++;\
  1603. src++;\
  1604. }\
  1605. }\
  1606. \
  1607. static void OPNAME ## mpeg4_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1608. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  1609. int i;\
  1610. \
  1611. for(i=0; i<h; i++)\
  1612. {\
  1613. OP(dst[ 0], (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]));\
  1614. OP(dst[ 1], (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]));\
  1615. OP(dst[ 2], (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]));\
  1616. OP(dst[ 3], (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]));\
  1617. OP(dst[ 4], (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]));\
  1618. OP(dst[ 5], (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]));\
  1619. OP(dst[ 6], (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]));\
  1620. OP(dst[ 7], (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]));\
  1621. OP(dst[ 8], (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]));\
  1622. OP(dst[ 9], (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]));\
  1623. OP(dst[10], (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]));\
  1624. OP(dst[11], (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]));\
  1625. OP(dst[12], (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]));\
  1626. OP(dst[13], (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]));\
  1627. OP(dst[14], (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]));\
  1628. OP(dst[15], (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]));\
  1629. dst+=dstStride;\
  1630. src+=srcStride;\
  1631. }\
  1632. }\
  1633. \
  1634. static void OPNAME ## mpeg4_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1635. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  1636. int i;\
  1637. const int w=16;\
  1638. for(i=0; i<w; i++)\
  1639. {\
  1640. const int src0= src[0*srcStride];\
  1641. const int src1= src[1*srcStride];\
  1642. const int src2= src[2*srcStride];\
  1643. const int src3= src[3*srcStride];\
  1644. const int src4= src[4*srcStride];\
  1645. const int src5= src[5*srcStride];\
  1646. const int src6= src[6*srcStride];\
  1647. const int src7= src[7*srcStride];\
  1648. const int src8= src[8*srcStride];\
  1649. const int src9= src[9*srcStride];\
  1650. const int src10= src[10*srcStride];\
  1651. const int src11= src[11*srcStride];\
  1652. const int src12= src[12*srcStride];\
  1653. const int src13= src[13*srcStride];\
  1654. const int src14= src[14*srcStride];\
  1655. const int src15= src[15*srcStride];\
  1656. const int src16= src[16*srcStride];\
  1657. OP(dst[ 0*dstStride], (src0 +src1 )*20 - (src0 +src2 )*6 + (src1 +src3 )*3 - (src2 +src4 ));\
  1658. OP(dst[ 1*dstStride], (src1 +src2 )*20 - (src0 +src3 )*6 + (src0 +src4 )*3 - (src1 +src5 ));\
  1659. OP(dst[ 2*dstStride], (src2 +src3 )*20 - (src1 +src4 )*6 + (src0 +src5 )*3 - (src0 +src6 ));\
  1660. OP(dst[ 3*dstStride], (src3 +src4 )*20 - (src2 +src5 )*6 + (src1 +src6 )*3 - (src0 +src7 ));\
  1661. OP(dst[ 4*dstStride], (src4 +src5 )*20 - (src3 +src6 )*6 + (src2 +src7 )*3 - (src1 +src8 ));\
  1662. OP(dst[ 5*dstStride], (src5 +src6 )*20 - (src4 +src7 )*6 + (src3 +src8 )*3 - (src2 +src9 ));\
  1663. OP(dst[ 6*dstStride], (src6 +src7 )*20 - (src5 +src8 )*6 + (src4 +src9 )*3 - (src3 +src10));\
  1664. OP(dst[ 7*dstStride], (src7 +src8 )*20 - (src6 +src9 )*6 + (src5 +src10)*3 - (src4 +src11));\
  1665. OP(dst[ 8*dstStride], (src8 +src9 )*20 - (src7 +src10)*6 + (src6 +src11)*3 - (src5 +src12));\
  1666. OP(dst[ 9*dstStride], (src9 +src10)*20 - (src8 +src11)*6 + (src7 +src12)*3 - (src6 +src13));\
  1667. OP(dst[10*dstStride], (src10+src11)*20 - (src9 +src12)*6 + (src8 +src13)*3 - (src7 +src14));\
  1668. OP(dst[11*dstStride], (src11+src12)*20 - (src10+src13)*6 + (src9 +src14)*3 - (src8 +src15));\
  1669. OP(dst[12*dstStride], (src12+src13)*20 - (src11+src14)*6 + (src10+src15)*3 - (src9 +src16));\
  1670. OP(dst[13*dstStride], (src13+src14)*20 - (src12+src15)*6 + (src11+src16)*3 - (src10+src16));\
  1671. OP(dst[14*dstStride], (src14+src15)*20 - (src13+src16)*6 + (src12+src16)*3 - (src11+src15));\
  1672. OP(dst[15*dstStride], (src15+src16)*20 - (src14+src16)*6 + (src13+src15)*3 - (src12+src14));\
  1673. dst++;\
  1674. src++;\
  1675. }\
  1676. }\
  1677. \
  1678. static void OPNAME ## qpel8_mc00_c (uint8_t *dst, uint8_t *src, int stride){\
  1679. OPNAME ## pixels8_c(dst, src, stride, 8);\
  1680. }\
  1681. \
  1682. static void OPNAME ## qpel8_mc10_c(uint8_t *dst, uint8_t *src, int stride){\
  1683. uint8_t half[64];\
  1684. put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
  1685. OPNAME ## pixels8_l2(dst, src, half, stride, stride, 8, 8);\
  1686. }\
  1687. \
  1688. static void OPNAME ## qpel8_mc20_c(uint8_t *dst, uint8_t *src, int stride){\
  1689. OPNAME ## mpeg4_qpel8_h_lowpass(dst, src, stride, stride, 8);\
  1690. }\
  1691. \
  1692. static void OPNAME ## qpel8_mc30_c(uint8_t *dst, uint8_t *src, int stride){\
  1693. uint8_t half[64];\
  1694. put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
  1695. OPNAME ## pixels8_l2(dst, src+1, half, stride, stride, 8, 8);\
  1696. }\
  1697. \
  1698. static void OPNAME ## qpel8_mc01_c(uint8_t *dst, uint8_t *src, int stride){\
  1699. uint8_t full[16*9];\
  1700. uint8_t half[64];\
  1701. copy_block9(full, src, 16, stride, 9);\
  1702. put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
  1703. OPNAME ## pixels8_l2(dst, full, half, stride, 16, 8, 8);\
  1704. }\
  1705. \
  1706. static void OPNAME ## qpel8_mc02_c(uint8_t *dst, uint8_t *src, int stride){\
  1707. uint8_t full[16*9];\
  1708. copy_block9(full, src, 16, stride, 9);\
  1709. OPNAME ## mpeg4_qpel8_v_lowpass(dst, full, stride, 16);\
  1710. }\
  1711. \
  1712. static void OPNAME ## qpel8_mc03_c(uint8_t *dst, uint8_t *src, int stride){\
  1713. uint8_t full[16*9];\
  1714. uint8_t half[64];\
  1715. copy_block9(full, src, 16, stride, 9);\
  1716. put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
  1717. OPNAME ## pixels8_l2(dst, full+16, half, stride, 16, 8, 8);\
  1718. }\
  1719. void ff_ ## OPNAME ## qpel8_mc11_old_c(uint8_t *dst, uint8_t *src, int stride){\
  1720. uint8_t full[16*9];\
  1721. uint8_t halfH[72];\
  1722. uint8_t halfV[64];\
  1723. uint8_t halfHV[64];\
  1724. copy_block9(full, src, 16, stride, 9);\
  1725. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  1726. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
  1727. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  1728. OPNAME ## pixels8_l4(dst, full, halfH, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
  1729. }\
  1730. static void OPNAME ## qpel8_mc11_c(uint8_t *dst, uint8_t *src, int stride){\
  1731. uint8_t full[16*9];\
  1732. uint8_t halfH[72];\
  1733. uint8_t halfHV[64];\
  1734. copy_block9(full, src, 16, stride, 9);\
  1735. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  1736. put ## RND ## pixels8_l2(halfH, halfH, full, 8, 8, 16, 9);\
  1737. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  1738. OPNAME ## pixels8_l2(dst, halfH, halfHV, stride, 8, 8, 8);\
  1739. }\
  1740. void ff_ ## OPNAME ## qpel8_mc31_old_c(uint8_t *dst, uint8_t *src, int stride){\
  1741. uint8_t full[16*9];\
  1742. uint8_t halfH[72];\
  1743. uint8_t halfV[64];\
  1744. uint8_t halfHV[64];\
  1745. copy_block9(full, src, 16, stride, 9);\
  1746. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  1747. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
  1748. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  1749. OPNAME ## pixels8_l4(dst, full+1, halfH, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
  1750. }\
  1751. static void OPNAME ## qpel8_mc31_c(uint8_t *dst, uint8_t *src, int stride){\
  1752. uint8_t full[16*9];\
  1753. uint8_t halfH[72];\
  1754. uint8_t halfHV[64];\
  1755. copy_block9(full, src, 16, stride, 9);\
  1756. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  1757. put ## RND ## pixels8_l2(halfH, halfH, full+1, 8, 8, 16, 9);\
  1758. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  1759. OPNAME ## pixels8_l2(dst, halfH, halfHV, stride, 8, 8, 8);\
  1760. }\
  1761. void ff_ ## OPNAME ## qpel8_mc13_old_c(uint8_t *dst, uint8_t *src, int stride){\
  1762. uint8_t full[16*9];\
  1763. uint8_t halfH[72];\
  1764. uint8_t halfV[64];\
  1765. uint8_t halfHV[64];\
  1766. copy_block9(full, src, 16, stride, 9);\
  1767. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  1768. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
  1769. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  1770. OPNAME ## pixels8_l4(dst, full+16, halfH+8, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
  1771. }\
  1772. static void OPNAME ## qpel8_mc13_c(uint8_t *dst, uint8_t *src, int stride){\
  1773. uint8_t full[16*9];\
  1774. uint8_t halfH[72];\
  1775. uint8_t halfHV[64];\
  1776. copy_block9(full, src, 16, stride, 9);\
  1777. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  1778. put ## RND ## pixels8_l2(halfH, halfH, full, 8, 8, 16, 9);\
  1779. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  1780. OPNAME ## pixels8_l2(dst, halfH+8, halfHV, stride, 8, 8, 8);\
  1781. }\
  1782. void ff_ ## OPNAME ## qpel8_mc33_old_c(uint8_t *dst, uint8_t *src, int stride){\
  1783. uint8_t full[16*9];\
  1784. uint8_t halfH[72];\
  1785. uint8_t halfV[64];\
  1786. uint8_t halfHV[64];\
  1787. copy_block9(full, src, 16, stride, 9);\
  1788. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full , 8, 16, 9);\
  1789. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
  1790. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  1791. OPNAME ## pixels8_l4(dst, full+17, halfH+8, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
  1792. }\
  1793. static void OPNAME ## qpel8_mc33_c(uint8_t *dst, uint8_t *src, int stride){\
  1794. uint8_t full[16*9];\
  1795. uint8_t halfH[72];\
  1796. uint8_t halfHV[64];\
  1797. copy_block9(full, src, 16, stride, 9);\
  1798. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  1799. put ## RND ## pixels8_l2(halfH, halfH, full+1, 8, 8, 16, 9);\
  1800. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  1801. OPNAME ## pixels8_l2(dst, halfH+8, halfHV, stride, 8, 8, 8);\
  1802. }\
  1803. static void OPNAME ## qpel8_mc21_c(uint8_t *dst, uint8_t *src, int stride){\
  1804. uint8_t halfH[72];\
  1805. uint8_t halfHV[64];\
  1806. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
  1807. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  1808. OPNAME ## pixels8_l2(dst, halfH, halfHV, stride, 8, 8, 8);\
  1809. }\
  1810. static void OPNAME ## qpel8_mc23_c(uint8_t *dst, uint8_t *src, int stride){\
  1811. uint8_t halfH[72];\
  1812. uint8_t halfHV[64];\
  1813. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
  1814. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  1815. OPNAME ## pixels8_l2(dst, halfH+8, halfHV, stride, 8, 8, 8);\
  1816. }\
  1817. void ff_ ## OPNAME ## qpel8_mc12_old_c(uint8_t *dst, uint8_t *src, int stride){\
  1818. uint8_t full[16*9];\
  1819. uint8_t halfH[72];\
  1820. uint8_t halfV[64];\
  1821. uint8_t halfHV[64];\
  1822. copy_block9(full, src, 16, stride, 9);\
  1823. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  1824. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
  1825. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  1826. OPNAME ## pixels8_l2(dst, halfV, halfHV, stride, 8, 8, 8);\
  1827. }\
  1828. static void OPNAME ## qpel8_mc12_c(uint8_t *dst, uint8_t *src, int stride){\
  1829. uint8_t full[16*9];\
  1830. uint8_t halfH[72];\
  1831. copy_block9(full, src, 16, stride, 9);\
  1832. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  1833. put ## RND ## pixels8_l2(halfH, halfH, full, 8, 8, 16, 9);\
  1834. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
  1835. }\
  1836. void ff_ ## OPNAME ## qpel8_mc32_old_c(uint8_t *dst, uint8_t *src, int stride){\
  1837. uint8_t full[16*9];\
  1838. uint8_t halfH[72];\
  1839. uint8_t halfV[64];\
  1840. uint8_t halfHV[64];\
  1841. copy_block9(full, src, 16, stride, 9);\
  1842. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  1843. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
  1844. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
  1845. OPNAME ## pixels8_l2(dst, halfV, halfHV, stride, 8, 8, 8);\
  1846. }\
  1847. static void OPNAME ## qpel8_mc32_c(uint8_t *dst, uint8_t *src, int stride){\
  1848. uint8_t full[16*9];\
  1849. uint8_t halfH[72];\
  1850. copy_block9(full, src, 16, stride, 9);\
  1851. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
  1852. put ## RND ## pixels8_l2(halfH, halfH, full+1, 8, 8, 16, 9);\
  1853. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
  1854. }\
  1855. static void OPNAME ## qpel8_mc22_c(uint8_t *dst, uint8_t *src, int stride){\
  1856. uint8_t halfH[72];\
  1857. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
  1858. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
  1859. }\
  1860. static void OPNAME ## qpel16_mc00_c (uint8_t *dst, uint8_t *src, int stride){\
  1861. OPNAME ## pixels16_c(dst, src, stride, 16);\
  1862. }\
  1863. \
  1864. static void OPNAME ## qpel16_mc10_c(uint8_t *dst, uint8_t *src, int stride){\
  1865. uint8_t half[256];\
  1866. put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
  1867. OPNAME ## pixels16_l2(dst, src, half, stride, stride, 16, 16);\
  1868. }\
  1869. \
  1870. static void OPNAME ## qpel16_mc20_c(uint8_t *dst, uint8_t *src, int stride){\
  1871. OPNAME ## mpeg4_qpel16_h_lowpass(dst, src, stride, stride, 16);\
  1872. }\
  1873. \
  1874. static void OPNAME ## qpel16_mc30_c(uint8_t *dst, uint8_t *src, int stride){\
  1875. uint8_t half[256];\
  1876. put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
  1877. OPNAME ## pixels16_l2(dst, src+1, half, stride, stride, 16, 16);\
  1878. }\
  1879. \
  1880. static void OPNAME ## qpel16_mc01_c(uint8_t *dst, uint8_t *src, int stride){\
  1881. uint8_t full[24*17];\
  1882. uint8_t half[256];\
  1883. copy_block17(full, src, 24, stride, 17);\
  1884. put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
  1885. OPNAME ## pixels16_l2(dst, full, half, stride, 24, 16, 16);\
  1886. }\
  1887. \
  1888. static void OPNAME ## qpel16_mc02_c(uint8_t *dst, uint8_t *src, int stride){\
  1889. uint8_t full[24*17];\
  1890. copy_block17(full, src, 24, stride, 17);\
  1891. OPNAME ## mpeg4_qpel16_v_lowpass(dst, full, stride, 24);\
  1892. }\
  1893. \
  1894. static void OPNAME ## qpel16_mc03_c(uint8_t *dst, uint8_t *src, int stride){\
  1895. uint8_t full[24*17];\
  1896. uint8_t half[256];\
  1897. copy_block17(full, src, 24, stride, 17);\
  1898. put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
  1899. OPNAME ## pixels16_l2(dst, full+24, half, stride, 24, 16, 16);\
  1900. }\
  1901. void ff_ ## OPNAME ## qpel16_mc11_old_c(uint8_t *dst, uint8_t *src, int stride){\
  1902. uint8_t full[24*17];\
  1903. uint8_t halfH[272];\
  1904. uint8_t halfV[256];\
  1905. uint8_t halfHV[256];\
  1906. copy_block17(full, src, 24, stride, 17);\
  1907. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1908. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
  1909. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1910. OPNAME ## pixels16_l4(dst, full, halfH, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
  1911. }\
  1912. static void OPNAME ## qpel16_mc11_c(uint8_t *dst, uint8_t *src, int stride){\
  1913. uint8_t full[24*17];\
  1914. uint8_t halfH[272];\
  1915. uint8_t halfHV[256];\
  1916. copy_block17(full, src, 24, stride, 17);\
  1917. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1918. put ## RND ## pixels16_l2(halfH, halfH, full, 16, 16, 24, 17);\
  1919. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1920. OPNAME ## pixels16_l2(dst, halfH, halfHV, stride, 16, 16, 16);\
  1921. }\
  1922. void ff_ ## OPNAME ## qpel16_mc31_old_c(uint8_t *dst, uint8_t *src, int stride){\
  1923. uint8_t full[24*17];\
  1924. uint8_t halfH[272];\
  1925. uint8_t halfV[256];\
  1926. uint8_t halfHV[256];\
  1927. copy_block17(full, src, 24, stride, 17);\
  1928. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1929. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
  1930. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1931. OPNAME ## pixels16_l4(dst, full+1, halfH, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
  1932. }\
  1933. static void OPNAME ## qpel16_mc31_c(uint8_t *dst, uint8_t *src, int stride){\
  1934. uint8_t full[24*17];\
  1935. uint8_t halfH[272];\
  1936. uint8_t halfHV[256];\
  1937. copy_block17(full, src, 24, stride, 17);\
  1938. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1939. put ## RND ## pixels16_l2(halfH, halfH, full+1, 16, 16, 24, 17);\
  1940. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1941. OPNAME ## pixels16_l2(dst, halfH, halfHV, stride, 16, 16, 16);\
  1942. }\
  1943. void ff_ ## OPNAME ## qpel16_mc13_old_c(uint8_t *dst, uint8_t *src, int stride){\
  1944. uint8_t full[24*17];\
  1945. uint8_t halfH[272];\
  1946. uint8_t halfV[256];\
  1947. uint8_t halfHV[256];\
  1948. copy_block17(full, src, 24, stride, 17);\
  1949. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1950. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
  1951. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1952. OPNAME ## pixels16_l4(dst, full+24, halfH+16, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
  1953. }\
  1954. static void OPNAME ## qpel16_mc13_c(uint8_t *dst, uint8_t *src, int stride){\
  1955. uint8_t full[24*17];\
  1956. uint8_t halfH[272];\
  1957. uint8_t halfHV[256];\
  1958. copy_block17(full, src, 24, stride, 17);\
  1959. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1960. put ## RND ## pixels16_l2(halfH, halfH, full, 16, 16, 24, 17);\
  1961. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1962. OPNAME ## pixels16_l2(dst, halfH+16, halfHV, stride, 16, 16, 16);\
  1963. }\
  1964. void ff_ ## OPNAME ## qpel16_mc33_old_c(uint8_t *dst, uint8_t *src, int stride){\
  1965. uint8_t full[24*17];\
  1966. uint8_t halfH[272];\
  1967. uint8_t halfV[256];\
  1968. uint8_t halfHV[256];\
  1969. copy_block17(full, src, 24, stride, 17);\
  1970. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full , 16, 24, 17);\
  1971. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
  1972. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1973. OPNAME ## pixels16_l4(dst, full+25, halfH+16, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
  1974. }\
  1975. static void OPNAME ## qpel16_mc33_c(uint8_t *dst, uint8_t *src, int stride){\
  1976. uint8_t full[24*17];\
  1977. uint8_t halfH[272];\
  1978. uint8_t halfHV[256];\
  1979. copy_block17(full, src, 24, stride, 17);\
  1980. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  1981. put ## RND ## pixels16_l2(halfH, halfH, full+1, 16, 16, 24, 17);\
  1982. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1983. OPNAME ## pixels16_l2(dst, halfH+16, halfHV, stride, 16, 16, 16);\
  1984. }\
  1985. static void OPNAME ## qpel16_mc21_c(uint8_t *dst, uint8_t *src, int stride){\
  1986. uint8_t halfH[272];\
  1987. uint8_t halfHV[256];\
  1988. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
  1989. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1990. OPNAME ## pixels16_l2(dst, halfH, halfHV, stride, 16, 16, 16);\
  1991. }\
  1992. static void OPNAME ## qpel16_mc23_c(uint8_t *dst, uint8_t *src, int stride){\
  1993. uint8_t halfH[272];\
  1994. uint8_t halfHV[256];\
  1995. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
  1996. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  1997. OPNAME ## pixels16_l2(dst, halfH+16, halfHV, stride, 16, 16, 16);\
  1998. }\
  1999. void ff_ ## OPNAME ## qpel16_mc12_old_c(uint8_t *dst, uint8_t *src, int stride){\
  2000. uint8_t full[24*17];\
  2001. uint8_t halfH[272];\
  2002. uint8_t halfV[256];\
  2003. uint8_t halfHV[256];\
  2004. copy_block17(full, src, 24, stride, 17);\
  2005. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  2006. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
  2007. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  2008. OPNAME ## pixels16_l2(dst, halfV, halfHV, stride, 16, 16, 16);\
  2009. }\
  2010. static void OPNAME ## qpel16_mc12_c(uint8_t *dst, uint8_t *src, int stride){\
  2011. uint8_t full[24*17];\
  2012. uint8_t halfH[272];\
  2013. copy_block17(full, src, 24, stride, 17);\
  2014. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  2015. put ## RND ## pixels16_l2(halfH, halfH, full, 16, 16, 24, 17);\
  2016. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
  2017. }\
  2018. void ff_ ## OPNAME ## qpel16_mc32_old_c(uint8_t *dst, uint8_t *src, int stride){\
  2019. uint8_t full[24*17];\
  2020. uint8_t halfH[272];\
  2021. uint8_t halfV[256];\
  2022. uint8_t halfHV[256];\
  2023. copy_block17(full, src, 24, stride, 17);\
  2024. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  2025. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
  2026. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
  2027. OPNAME ## pixels16_l2(dst, halfV, halfHV, stride, 16, 16, 16);\
  2028. }\
  2029. static void OPNAME ## qpel16_mc32_c(uint8_t *dst, uint8_t *src, int stride){\
  2030. uint8_t full[24*17];\
  2031. uint8_t halfH[272];\
  2032. copy_block17(full, src, 24, stride, 17);\
  2033. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
  2034. put ## RND ## pixels16_l2(halfH, halfH, full+1, 16, 16, 24, 17);\
  2035. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
  2036. }\
  2037. static void OPNAME ## qpel16_mc22_c(uint8_t *dst, uint8_t *src, int stride){\
  2038. uint8_t halfH[272];\
  2039. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
  2040. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
  2041. }
  2042. #define op_avg(a, b) a = (((a)+cm[((b) + 16)>>5]+1)>>1)
  2043. #define op_avg_no_rnd(a, b) a = (((a)+cm[((b) + 15)>>5])>>1)
  2044. #define op_put(a, b) a = cm[((b) + 16)>>5]
  2045. #define op_put_no_rnd(a, b) a = cm[((b) + 15)>>5]
  2046. QPEL_MC(0, put_ , _ , op_put)
  2047. QPEL_MC(1, put_no_rnd_, _no_rnd_, op_put_no_rnd)
  2048. QPEL_MC(0, avg_ , _ , op_avg)
  2049. //QPEL_MC(1, avg_no_rnd , _ , op_avg)
  2050. #undef op_avg
  2051. #undef op_avg_no_rnd
  2052. #undef op_put
  2053. #undef op_put_no_rnd
  2054. #if 1
  2055. #define H264_LOWPASS(OPNAME, OP, OP2) \
  2056. static av_unused void OPNAME ## h264_qpel2_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  2057. const int h=2;\
  2058. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  2059. int i;\
  2060. for(i=0; i<h; i++)\
  2061. {\
  2062. OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
  2063. OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
  2064. dst+=dstStride;\
  2065. src+=srcStride;\
  2066. }\
  2067. }\
  2068. \
  2069. static av_unused void OPNAME ## h264_qpel2_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  2070. const int w=2;\
  2071. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  2072. int i;\
  2073. for(i=0; i<w; i++)\
  2074. {\
  2075. const int srcB= src[-2*srcStride];\
  2076. const int srcA= src[-1*srcStride];\
  2077. const int src0= src[0 *srcStride];\
  2078. const int src1= src[1 *srcStride];\
  2079. const int src2= src[2 *srcStride];\
  2080. const int src3= src[3 *srcStride];\
  2081. const int src4= src[4 *srcStride];\
  2082. OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
  2083. OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
  2084. dst++;\
  2085. src++;\
  2086. }\
  2087. }\
  2088. \
  2089. static av_unused void OPNAME ## h264_qpel2_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  2090. const int h=2;\
  2091. const int w=2;\
  2092. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  2093. int i;\
  2094. src -= 2*srcStride;\
  2095. for(i=0; i<h+5; i++)\
  2096. {\
  2097. tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]);\
  2098. tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]);\
  2099. tmp+=tmpStride;\
  2100. src+=srcStride;\
  2101. }\
  2102. tmp -= tmpStride*(h+5-2);\
  2103. for(i=0; i<w; i++)\
  2104. {\
  2105. const int tmpB= tmp[-2*tmpStride];\
  2106. const int tmpA= tmp[-1*tmpStride];\
  2107. const int tmp0= tmp[0 *tmpStride];\
  2108. const int tmp1= tmp[1 *tmpStride];\
  2109. const int tmp2= tmp[2 *tmpStride];\
  2110. const int tmp3= tmp[3 *tmpStride];\
  2111. const int tmp4= tmp[4 *tmpStride];\
  2112. OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
  2113. OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
  2114. dst++;\
  2115. tmp++;\
  2116. }\
  2117. }\
  2118. static void OPNAME ## h264_qpel4_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  2119. const int h=4;\
  2120. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  2121. int i;\
  2122. for(i=0; i<h; i++)\
  2123. {\
  2124. OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
  2125. OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
  2126. OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]));\
  2127. OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]));\
  2128. dst+=dstStride;\
  2129. src+=srcStride;\
  2130. }\
  2131. }\
  2132. \
  2133. static void OPNAME ## h264_qpel4_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  2134. const int w=4;\
  2135. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  2136. int i;\
  2137. for(i=0; i<w; i++)\
  2138. {\
  2139. const int srcB= src[-2*srcStride];\
  2140. const int srcA= src[-1*srcStride];\
  2141. const int src0= src[0 *srcStride];\
  2142. const int src1= src[1 *srcStride];\
  2143. const int src2= src[2 *srcStride];\
  2144. const int src3= src[3 *srcStride];\
  2145. const int src4= src[4 *srcStride];\
  2146. const int src5= src[5 *srcStride];\
  2147. const int src6= src[6 *srcStride];\
  2148. OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
  2149. OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
  2150. OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
  2151. OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
  2152. dst++;\
  2153. src++;\
  2154. }\
  2155. }\
  2156. \
  2157. static void OPNAME ## h264_qpel4_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  2158. const int h=4;\
  2159. const int w=4;\
  2160. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  2161. int i;\
  2162. src -= 2*srcStride;\
  2163. for(i=0; i<h+5; i++)\
  2164. {\
  2165. tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]);\
  2166. tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]);\
  2167. tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]);\
  2168. tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]);\
  2169. tmp+=tmpStride;\
  2170. src+=srcStride;\
  2171. }\
  2172. tmp -= tmpStride*(h+5-2);\
  2173. for(i=0; i<w; i++)\
  2174. {\
  2175. const int tmpB= tmp[-2*tmpStride];\
  2176. const int tmpA= tmp[-1*tmpStride];\
  2177. const int tmp0= tmp[0 *tmpStride];\
  2178. const int tmp1= tmp[1 *tmpStride];\
  2179. const int tmp2= tmp[2 *tmpStride];\
  2180. const int tmp3= tmp[3 *tmpStride];\
  2181. const int tmp4= tmp[4 *tmpStride];\
  2182. const int tmp5= tmp[5 *tmpStride];\
  2183. const int tmp6= tmp[6 *tmpStride];\
  2184. OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
  2185. OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
  2186. OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
  2187. OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
  2188. dst++;\
  2189. tmp++;\
  2190. }\
  2191. }\
  2192. \
  2193. static void OPNAME ## h264_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  2194. const int h=8;\
  2195. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  2196. int i;\
  2197. for(i=0; i<h; i++)\
  2198. {\
  2199. OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]));\
  2200. OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]));\
  2201. OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]));\
  2202. OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]));\
  2203. OP(dst[4], (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]));\
  2204. OP(dst[5], (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]));\
  2205. OP(dst[6], (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]));\
  2206. OP(dst[7], (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]));\
  2207. dst+=dstStride;\
  2208. src+=srcStride;\
  2209. }\
  2210. }\
  2211. \
  2212. static void OPNAME ## h264_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  2213. const int w=8;\
  2214. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  2215. int i;\
  2216. for(i=0; i<w; i++)\
  2217. {\
  2218. const int srcB= src[-2*srcStride];\
  2219. const int srcA= src[-1*srcStride];\
  2220. const int src0= src[0 *srcStride];\
  2221. const int src1= src[1 *srcStride];\
  2222. const int src2= src[2 *srcStride];\
  2223. const int src3= src[3 *srcStride];\
  2224. const int src4= src[4 *srcStride];\
  2225. const int src5= src[5 *srcStride];\
  2226. const int src6= src[6 *srcStride];\
  2227. const int src7= src[7 *srcStride];\
  2228. const int src8= src[8 *srcStride];\
  2229. const int src9= src[9 *srcStride];\
  2230. const int src10=src[10*srcStride];\
  2231. OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
  2232. OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
  2233. OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
  2234. OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
  2235. OP(dst[4*dstStride], (src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
  2236. OP(dst[5*dstStride], (src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
  2237. OP(dst[6*dstStride], (src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
  2238. OP(dst[7*dstStride], (src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
  2239. dst++;\
  2240. src++;\
  2241. }\
  2242. }\
  2243. \
  2244. static void OPNAME ## h264_qpel8_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  2245. const int h=8;\
  2246. const int w=8;\
  2247. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  2248. int i;\
  2249. src -= 2*srcStride;\
  2250. for(i=0; i<h+5; i++)\
  2251. {\
  2252. tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]);\
  2253. tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]);\
  2254. tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]);\
  2255. tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]);\
  2256. tmp[4]= (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]);\
  2257. tmp[5]= (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]);\
  2258. tmp[6]= (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]);\
  2259. tmp[7]= (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]);\
  2260. tmp+=tmpStride;\
  2261. src+=srcStride;\
  2262. }\
  2263. tmp -= tmpStride*(h+5-2);\
  2264. for(i=0; i<w; i++)\
  2265. {\
  2266. const int tmpB= tmp[-2*tmpStride];\
  2267. const int tmpA= tmp[-1*tmpStride];\
  2268. const int tmp0= tmp[0 *tmpStride];\
  2269. const int tmp1= tmp[1 *tmpStride];\
  2270. const int tmp2= tmp[2 *tmpStride];\
  2271. const int tmp3= tmp[3 *tmpStride];\
  2272. const int tmp4= tmp[4 *tmpStride];\
  2273. const int tmp5= tmp[5 *tmpStride];\
  2274. const int tmp6= tmp[6 *tmpStride];\
  2275. const int tmp7= tmp[7 *tmpStride];\
  2276. const int tmp8= tmp[8 *tmpStride];\
  2277. const int tmp9= tmp[9 *tmpStride];\
  2278. const int tmp10=tmp[10*tmpStride];\
  2279. OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
  2280. OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
  2281. OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
  2282. OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
  2283. OP2(dst[4*dstStride], (tmp4+tmp5)*20 - (tmp3+tmp6)*5 + (tmp2+tmp7));\
  2284. OP2(dst[5*dstStride], (tmp5+tmp6)*20 - (tmp4+tmp7)*5 + (tmp3+tmp8));\
  2285. OP2(dst[6*dstStride], (tmp6+tmp7)*20 - (tmp5+tmp8)*5 + (tmp4+tmp9));\
  2286. OP2(dst[7*dstStride], (tmp7+tmp8)*20 - (tmp6+tmp9)*5 + (tmp5+tmp10));\
  2287. dst++;\
  2288. tmp++;\
  2289. }\
  2290. }\
  2291. \
  2292. static void OPNAME ## h264_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  2293. OPNAME ## h264_qpel8_v_lowpass(dst , src , dstStride, srcStride);\
  2294. OPNAME ## h264_qpel8_v_lowpass(dst+8, src+8, dstStride, srcStride);\
  2295. src += 8*srcStride;\
  2296. dst += 8*dstStride;\
  2297. OPNAME ## h264_qpel8_v_lowpass(dst , src , dstStride, srcStride);\
  2298. OPNAME ## h264_qpel8_v_lowpass(dst+8, src+8, dstStride, srcStride);\
  2299. }\
  2300. \
  2301. static void OPNAME ## h264_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  2302. OPNAME ## h264_qpel8_h_lowpass(dst , src , dstStride, srcStride);\
  2303. OPNAME ## h264_qpel8_h_lowpass(dst+8, src+8, dstStride, srcStride);\
  2304. src += 8*srcStride;\
  2305. dst += 8*dstStride;\
  2306. OPNAME ## h264_qpel8_h_lowpass(dst , src , dstStride, srcStride);\
  2307. OPNAME ## h264_qpel8_h_lowpass(dst+8, src+8, dstStride, srcStride);\
  2308. }\
  2309. \
  2310. static void OPNAME ## h264_qpel16_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  2311. OPNAME ## h264_qpel8_hv_lowpass(dst , tmp , src , dstStride, tmpStride, srcStride);\
  2312. OPNAME ## h264_qpel8_hv_lowpass(dst+8, tmp+8, src+8, dstStride, tmpStride, srcStride);\
  2313. src += 8*srcStride;\
  2314. dst += 8*dstStride;\
  2315. OPNAME ## h264_qpel8_hv_lowpass(dst , tmp , src , dstStride, tmpStride, srcStride);\
  2316. OPNAME ## h264_qpel8_hv_lowpass(dst+8, tmp+8, src+8, dstStride, tmpStride, srcStride);\
  2317. }\
  2318. #define H264_MC(OPNAME, SIZE) \
  2319. static void OPNAME ## h264_qpel ## SIZE ## _mc00_c (uint8_t *dst, uint8_t *src, int stride){\
  2320. OPNAME ## pixels ## SIZE ## _c(dst, src, stride, SIZE);\
  2321. }\
  2322. \
  2323. static void OPNAME ## h264_qpel ## SIZE ## _mc10_c(uint8_t *dst, uint8_t *src, int stride){\
  2324. uint8_t half[SIZE*SIZE];\
  2325. put_h264_qpel ## SIZE ## _h_lowpass(half, src, SIZE, stride);\
  2326. OPNAME ## pixels ## SIZE ## _l2(dst, src, half, stride, stride, SIZE, SIZE);\
  2327. }\
  2328. \
  2329. static void OPNAME ## h264_qpel ## SIZE ## _mc20_c(uint8_t *dst, uint8_t *src, int stride){\
  2330. OPNAME ## h264_qpel ## SIZE ## _h_lowpass(dst, src, stride, stride);\
  2331. }\
  2332. \
  2333. static void OPNAME ## h264_qpel ## SIZE ## _mc30_c(uint8_t *dst, uint8_t *src, int stride){\
  2334. uint8_t half[SIZE*SIZE];\
  2335. put_h264_qpel ## SIZE ## _h_lowpass(half, src, SIZE, stride);\
  2336. OPNAME ## pixels ## SIZE ## _l2(dst, src+1, half, stride, stride, SIZE, SIZE);\
  2337. }\
  2338. \
  2339. static void OPNAME ## h264_qpel ## SIZE ## _mc01_c(uint8_t *dst, uint8_t *src, int stride){\
  2340. uint8_t full[SIZE*(SIZE+5)];\
  2341. uint8_t * const full_mid= full + SIZE*2;\
  2342. uint8_t half[SIZE*SIZE];\
  2343. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  2344. put_h264_qpel ## SIZE ## _v_lowpass(half, full_mid, SIZE, SIZE);\
  2345. OPNAME ## pixels ## SIZE ## _l2(dst, full_mid, half, stride, SIZE, SIZE, SIZE);\
  2346. }\
  2347. \
  2348. static void OPNAME ## h264_qpel ## SIZE ## _mc02_c(uint8_t *dst, uint8_t *src, int stride){\
  2349. uint8_t full[SIZE*(SIZE+5)];\
  2350. uint8_t * const full_mid= full + SIZE*2;\
  2351. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  2352. OPNAME ## h264_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE);\
  2353. }\
  2354. \
  2355. static void OPNAME ## h264_qpel ## SIZE ## _mc03_c(uint8_t *dst, uint8_t *src, int stride){\
  2356. uint8_t full[SIZE*(SIZE+5)];\
  2357. uint8_t * const full_mid= full + SIZE*2;\
  2358. uint8_t half[SIZE*SIZE];\
  2359. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  2360. put_h264_qpel ## SIZE ## _v_lowpass(half, full_mid, SIZE, SIZE);\
  2361. OPNAME ## pixels ## SIZE ## _l2(dst, full_mid+SIZE, half, stride, SIZE, SIZE, SIZE);\
  2362. }\
  2363. \
  2364. static void OPNAME ## h264_qpel ## SIZE ## _mc11_c(uint8_t *dst, uint8_t *src, int stride){\
  2365. uint8_t full[SIZE*(SIZE+5)];\
  2366. uint8_t * const full_mid= full + SIZE*2;\
  2367. uint8_t halfH[SIZE*SIZE];\
  2368. uint8_t halfV[SIZE*SIZE];\
  2369. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
  2370. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  2371. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  2372. OPNAME ## pixels ## SIZE ## _l2(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
  2373. }\
  2374. \
  2375. static void OPNAME ## h264_qpel ## SIZE ## _mc31_c(uint8_t *dst, uint8_t *src, int stride){\
  2376. uint8_t full[SIZE*(SIZE+5)];\
  2377. uint8_t * const full_mid= full + SIZE*2;\
  2378. uint8_t halfH[SIZE*SIZE];\
  2379. uint8_t halfV[SIZE*SIZE];\
  2380. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
  2381. copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
  2382. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  2383. OPNAME ## pixels ## SIZE ## _l2(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
  2384. }\
  2385. \
  2386. static void OPNAME ## h264_qpel ## SIZE ## _mc13_c(uint8_t *dst, uint8_t *src, int stride){\
  2387. uint8_t full[SIZE*(SIZE+5)];\
  2388. uint8_t * const full_mid= full + SIZE*2;\
  2389. uint8_t halfH[SIZE*SIZE];\
  2390. uint8_t halfV[SIZE*SIZE];\
  2391. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
  2392. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  2393. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  2394. OPNAME ## pixels ## SIZE ## _l2(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
  2395. }\
  2396. \
  2397. static void OPNAME ## h264_qpel ## SIZE ## _mc33_c(uint8_t *dst, uint8_t *src, int stride){\
  2398. uint8_t full[SIZE*(SIZE+5)];\
  2399. uint8_t * const full_mid= full + SIZE*2;\
  2400. uint8_t halfH[SIZE*SIZE];\
  2401. uint8_t halfV[SIZE*SIZE];\
  2402. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
  2403. copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
  2404. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  2405. OPNAME ## pixels ## SIZE ## _l2(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
  2406. }\
  2407. \
  2408. static void OPNAME ## h264_qpel ## SIZE ## _mc22_c(uint8_t *dst, uint8_t *src, int stride){\
  2409. int16_t tmp[SIZE*(SIZE+5)];\
  2410. OPNAME ## h264_qpel ## SIZE ## _hv_lowpass(dst, tmp, src, stride, SIZE, stride);\
  2411. }\
  2412. \
  2413. static void OPNAME ## h264_qpel ## SIZE ## _mc21_c(uint8_t *dst, uint8_t *src, int stride){\
  2414. int16_t tmp[SIZE*(SIZE+5)];\
  2415. uint8_t halfH[SIZE*SIZE];\
  2416. uint8_t halfHV[SIZE*SIZE];\
  2417. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
  2418. put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
  2419. OPNAME ## pixels ## SIZE ## _l2(dst, halfH, halfHV, stride, SIZE, SIZE, SIZE);\
  2420. }\
  2421. \
  2422. static void OPNAME ## h264_qpel ## SIZE ## _mc23_c(uint8_t *dst, uint8_t *src, int stride){\
  2423. int16_t tmp[SIZE*(SIZE+5)];\
  2424. uint8_t halfH[SIZE*SIZE];\
  2425. uint8_t halfHV[SIZE*SIZE];\
  2426. put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
  2427. put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
  2428. OPNAME ## pixels ## SIZE ## _l2(dst, halfH, halfHV, stride, SIZE, SIZE, SIZE);\
  2429. }\
  2430. \
  2431. static void OPNAME ## h264_qpel ## SIZE ## _mc12_c(uint8_t *dst, uint8_t *src, int stride){\
  2432. uint8_t full[SIZE*(SIZE+5)];\
  2433. uint8_t * const full_mid= full + SIZE*2;\
  2434. int16_t tmp[SIZE*(SIZE+5)];\
  2435. uint8_t halfV[SIZE*SIZE];\
  2436. uint8_t halfHV[SIZE*SIZE];\
  2437. copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
  2438. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  2439. put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
  2440. OPNAME ## pixels ## SIZE ## _l2(dst, halfV, halfHV, stride, SIZE, SIZE, SIZE);\
  2441. }\
  2442. \
  2443. static void OPNAME ## h264_qpel ## SIZE ## _mc32_c(uint8_t *dst, uint8_t *src, int stride){\
  2444. uint8_t full[SIZE*(SIZE+5)];\
  2445. uint8_t * const full_mid= full + SIZE*2;\
  2446. int16_t tmp[SIZE*(SIZE+5)];\
  2447. uint8_t halfV[SIZE*SIZE];\
  2448. uint8_t halfHV[SIZE*SIZE];\
  2449. copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
  2450. put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
  2451. put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
  2452. OPNAME ## pixels ## SIZE ## _l2(dst, halfV, halfHV, stride, SIZE, SIZE, SIZE);\
  2453. }\
  2454. #define op_avg(a, b) a = (((a)+cm[((b) + 16)>>5]+1)>>1)
  2455. //#define op_avg2(a, b) a = (((a)*w1+cm[((b) + 16)>>5]*w2 + o + 64)>>7)
  2456. #define op_put(a, b) a = cm[((b) + 16)>>5]
  2457. #define op2_avg(a, b) a = (((a)+cm[((b) + 512)>>10]+1)>>1)
  2458. #define op2_put(a, b) a = cm[((b) + 512)>>10]
  2459. H264_LOWPASS(put_ , op_put, op2_put)
  2460. H264_LOWPASS(avg_ , op_avg, op2_avg)
  2461. H264_MC(put_, 2)
  2462. H264_MC(put_, 4)
  2463. H264_MC(put_, 8)
  2464. H264_MC(put_, 16)
  2465. H264_MC(avg_, 4)
  2466. H264_MC(avg_, 8)
  2467. H264_MC(avg_, 16)
  2468. #undef op_avg
  2469. #undef op_put
  2470. #undef op2_avg
  2471. #undef op2_put
  2472. #endif
  2473. static void wmv2_mspel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){
  2474. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  2475. int i;
  2476. for(i=0; i<h; i++){
  2477. dst[0]= cm[(9*(src[0] + src[1]) - (src[-1] + src[2]) + 8)>>4];
  2478. dst[1]= cm[(9*(src[1] + src[2]) - (src[ 0] + src[3]) + 8)>>4];
  2479. dst[2]= cm[(9*(src[2] + src[3]) - (src[ 1] + src[4]) + 8)>>4];
  2480. dst[3]= cm[(9*(src[3] + src[4]) - (src[ 2] + src[5]) + 8)>>4];
  2481. dst[4]= cm[(9*(src[4] + src[5]) - (src[ 3] + src[6]) + 8)>>4];
  2482. dst[5]= cm[(9*(src[5] + src[6]) - (src[ 4] + src[7]) + 8)>>4];
  2483. dst[6]= cm[(9*(src[6] + src[7]) - (src[ 5] + src[8]) + 8)>>4];
  2484. dst[7]= cm[(9*(src[7] + src[8]) - (src[ 6] + src[9]) + 8)>>4];
  2485. dst+=dstStride;
  2486. src+=srcStride;
  2487. }
  2488. }
  2489. #if CONFIG_CAVS_DECODER
  2490. /* AVS specific */
  2491. void ff_put_cavs_qpel8_mc00_c(uint8_t *dst, uint8_t *src, int stride) {
  2492. put_pixels8_c(dst, src, stride, 8);
  2493. }
  2494. void ff_avg_cavs_qpel8_mc00_c(uint8_t *dst, uint8_t *src, int stride) {
  2495. avg_pixels8_c(dst, src, stride, 8);
  2496. }
  2497. void ff_put_cavs_qpel16_mc00_c(uint8_t *dst, uint8_t *src, int stride) {
  2498. put_pixels16_c(dst, src, stride, 16);
  2499. }
  2500. void ff_avg_cavs_qpel16_mc00_c(uint8_t *dst, uint8_t *src, int stride) {
  2501. avg_pixels16_c(dst, src, stride, 16);
  2502. }
  2503. #endif /* CONFIG_CAVS_DECODER */
  2504. #if CONFIG_VC1_DECODER
  2505. /* VC-1 specific */
  2506. void ff_put_vc1_mspel_mc00_c(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
  2507. put_pixels8_c(dst, src, stride, 8);
  2508. }
  2509. void ff_avg_vc1_mspel_mc00_c(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
  2510. avg_pixels8_c(dst, src, stride, 8);
  2511. }
  2512. #endif /* CONFIG_VC1_DECODER */
  2513. #if CONFIG_RV40_DECODER
  2514. static void put_rv40_qpel16_mc33_c(uint8_t *dst, uint8_t *src, int stride){
  2515. put_pixels16_xy2_c(dst, src, stride, 16);
  2516. }
  2517. static void avg_rv40_qpel16_mc33_c(uint8_t *dst, uint8_t *src, int stride){
  2518. avg_pixels16_xy2_c(dst, src, stride, 16);
  2519. }
  2520. static void put_rv40_qpel8_mc33_c(uint8_t *dst, uint8_t *src, int stride){
  2521. put_pixels8_xy2_c(dst, src, stride, 8);
  2522. }
  2523. static void avg_rv40_qpel8_mc33_c(uint8_t *dst, uint8_t *src, int stride){
  2524. avg_pixels8_xy2_c(dst, src, stride, 8);
  2525. }
  2526. #endif /* CONFIG_RV40_DECODER */
  2527. static void wmv2_mspel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int w){
  2528. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  2529. int i;
  2530. for(i=0; i<w; i++){
  2531. const int src_1= src[ -srcStride];
  2532. const int src0 = src[0 ];
  2533. const int src1 = src[ srcStride];
  2534. const int src2 = src[2*srcStride];
  2535. const int src3 = src[3*srcStride];
  2536. const int src4 = src[4*srcStride];
  2537. const int src5 = src[5*srcStride];
  2538. const int src6 = src[6*srcStride];
  2539. const int src7 = src[7*srcStride];
  2540. const int src8 = src[8*srcStride];
  2541. const int src9 = src[9*srcStride];
  2542. dst[0*dstStride]= cm[(9*(src0 + src1) - (src_1 + src2) + 8)>>4];
  2543. dst[1*dstStride]= cm[(9*(src1 + src2) - (src0 + src3) + 8)>>4];
  2544. dst[2*dstStride]= cm[(9*(src2 + src3) - (src1 + src4) + 8)>>4];
  2545. dst[3*dstStride]= cm[(9*(src3 + src4) - (src2 + src5) + 8)>>4];
  2546. dst[4*dstStride]= cm[(9*(src4 + src5) - (src3 + src6) + 8)>>4];
  2547. dst[5*dstStride]= cm[(9*(src5 + src6) - (src4 + src7) + 8)>>4];
  2548. dst[6*dstStride]= cm[(9*(src6 + src7) - (src5 + src8) + 8)>>4];
  2549. dst[7*dstStride]= cm[(9*(src7 + src8) - (src6 + src9) + 8)>>4];
  2550. src++;
  2551. dst++;
  2552. }
  2553. }
  2554. static void put_mspel8_mc00_c (uint8_t *dst, uint8_t *src, int stride){
  2555. put_pixels8_c(dst, src, stride, 8);
  2556. }
  2557. static void put_mspel8_mc10_c(uint8_t *dst, uint8_t *src, int stride){
  2558. uint8_t half[64];
  2559. wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
  2560. put_pixels8_l2(dst, src, half, stride, stride, 8, 8);
  2561. }
  2562. static void put_mspel8_mc20_c(uint8_t *dst, uint8_t *src, int stride){
  2563. wmv2_mspel8_h_lowpass(dst, src, stride, stride, 8);
  2564. }
  2565. static void put_mspel8_mc30_c(uint8_t *dst, uint8_t *src, int stride){
  2566. uint8_t half[64];
  2567. wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
  2568. put_pixels8_l2(dst, src+1, half, stride, stride, 8, 8);
  2569. }
  2570. static void put_mspel8_mc02_c(uint8_t *dst, uint8_t *src, int stride){
  2571. wmv2_mspel8_v_lowpass(dst, src, stride, stride, 8);
  2572. }
  2573. static void put_mspel8_mc12_c(uint8_t *dst, uint8_t *src, int stride){
  2574. uint8_t halfH[88];
  2575. uint8_t halfV[64];
  2576. uint8_t halfHV[64];
  2577. wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
  2578. wmv2_mspel8_v_lowpass(halfV, src, 8, stride, 8);
  2579. wmv2_mspel8_v_lowpass(halfHV, halfH+8, 8, 8, 8);
  2580. put_pixels8_l2(dst, halfV, halfHV, stride, 8, 8, 8);
  2581. }
  2582. static void put_mspel8_mc32_c(uint8_t *dst, uint8_t *src, int stride){
  2583. uint8_t halfH[88];
  2584. uint8_t halfV[64];
  2585. uint8_t halfHV[64];
  2586. wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
  2587. wmv2_mspel8_v_lowpass(halfV, src+1, 8, stride, 8);
  2588. wmv2_mspel8_v_lowpass(halfHV, halfH+8, 8, 8, 8);
  2589. put_pixels8_l2(dst, halfV, halfHV, stride, 8, 8, 8);
  2590. }
  2591. static void put_mspel8_mc22_c(uint8_t *dst, uint8_t *src, int stride){
  2592. uint8_t halfH[88];
  2593. wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
  2594. wmv2_mspel8_v_lowpass(dst, halfH+8, stride, 8, 8);
  2595. }
  2596. static void h263_v_loop_filter_c(uint8_t *src, int stride, int qscale){
  2597. if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
  2598. int x;
  2599. const int strength= ff_h263_loop_filter_strength[qscale];
  2600. for(x=0; x<8; x++){
  2601. int d1, d2, ad1;
  2602. int p0= src[x-2*stride];
  2603. int p1= src[x-1*stride];
  2604. int p2= src[x+0*stride];
  2605. int p3= src[x+1*stride];
  2606. int d = (p0 - p3 + 4*(p2 - p1)) / 8;
  2607. if (d<-2*strength) d1= 0;
  2608. else if(d<- strength) d1=-2*strength - d;
  2609. else if(d< strength) d1= d;
  2610. else if(d< 2*strength) d1= 2*strength - d;
  2611. else d1= 0;
  2612. p1 += d1;
  2613. p2 -= d1;
  2614. if(p1&256) p1= ~(p1>>31);
  2615. if(p2&256) p2= ~(p2>>31);
  2616. src[x-1*stride] = p1;
  2617. src[x+0*stride] = p2;
  2618. ad1= FFABS(d1)>>1;
  2619. d2= av_clip((p0-p3)/4, -ad1, ad1);
  2620. src[x-2*stride] = p0 - d2;
  2621. src[x+ stride] = p3 + d2;
  2622. }
  2623. }
  2624. }
  2625. static void h263_h_loop_filter_c(uint8_t *src, int stride, int qscale){
  2626. if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
  2627. int y;
  2628. const int strength= ff_h263_loop_filter_strength[qscale];
  2629. for(y=0; y<8; y++){
  2630. int d1, d2, ad1;
  2631. int p0= src[y*stride-2];
  2632. int p1= src[y*stride-1];
  2633. int p2= src[y*stride+0];
  2634. int p3= src[y*stride+1];
  2635. int d = (p0 - p3 + 4*(p2 - p1)) / 8;
  2636. if (d<-2*strength) d1= 0;
  2637. else if(d<- strength) d1=-2*strength - d;
  2638. else if(d< strength) d1= d;
  2639. else if(d< 2*strength) d1= 2*strength - d;
  2640. else d1= 0;
  2641. p1 += d1;
  2642. p2 -= d1;
  2643. if(p1&256) p1= ~(p1>>31);
  2644. if(p2&256) p2= ~(p2>>31);
  2645. src[y*stride-1] = p1;
  2646. src[y*stride+0] = p2;
  2647. ad1= FFABS(d1)>>1;
  2648. d2= av_clip((p0-p3)/4, -ad1, ad1);
  2649. src[y*stride-2] = p0 - d2;
  2650. src[y*stride+1] = p3 + d2;
  2651. }
  2652. }
  2653. }
  2654. static void h261_loop_filter_c(uint8_t *src, int stride){
  2655. int x,y,xy,yz;
  2656. int temp[64];
  2657. for(x=0; x<8; x++){
  2658. temp[x ] = 4*src[x ];
  2659. temp[x + 7*8] = 4*src[x + 7*stride];
  2660. }
  2661. for(y=1; y<7; y++){
  2662. for(x=0; x<8; x++){
  2663. xy = y * stride + x;
  2664. yz = y * 8 + x;
  2665. temp[yz] = src[xy - stride] + 2*src[xy] + src[xy + stride];
  2666. }
  2667. }
  2668. for(y=0; y<8; y++){
  2669. src[ y*stride] = (temp[ y*8] + 2)>>2;
  2670. src[7+y*stride] = (temp[7+y*8] + 2)>>2;
  2671. for(x=1; x<7; x++){
  2672. xy = y * stride + x;
  2673. yz = y * 8 + x;
  2674. src[xy] = (temp[yz-1] + 2*temp[yz] + temp[yz+1] + 8)>>4;
  2675. }
  2676. }
  2677. }
  2678. static inline int pix_abs16_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  2679. {
  2680. int s, i;
  2681. s = 0;
  2682. for(i=0;i<h;i++) {
  2683. s += abs(pix1[0] - pix2[0]);
  2684. s += abs(pix1[1] - pix2[1]);
  2685. s += abs(pix1[2] - pix2[2]);
  2686. s += abs(pix1[3] - pix2[3]);
  2687. s += abs(pix1[4] - pix2[4]);
  2688. s += abs(pix1[5] - pix2[5]);
  2689. s += abs(pix1[6] - pix2[6]);
  2690. s += abs(pix1[7] - pix2[7]);
  2691. s += abs(pix1[8] - pix2[8]);
  2692. s += abs(pix1[9] - pix2[9]);
  2693. s += abs(pix1[10] - pix2[10]);
  2694. s += abs(pix1[11] - pix2[11]);
  2695. s += abs(pix1[12] - pix2[12]);
  2696. s += abs(pix1[13] - pix2[13]);
  2697. s += abs(pix1[14] - pix2[14]);
  2698. s += abs(pix1[15] - pix2[15]);
  2699. pix1 += line_size;
  2700. pix2 += line_size;
  2701. }
  2702. return s;
  2703. }
  2704. static int pix_abs16_x2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  2705. {
  2706. int s, i;
  2707. s = 0;
  2708. for(i=0;i<h;i++) {
  2709. s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
  2710. s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
  2711. s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
  2712. s += abs(pix1[3] - avg2(pix2[3], pix2[4]));
  2713. s += abs(pix1[4] - avg2(pix2[4], pix2[5]));
  2714. s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
  2715. s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
  2716. s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
  2717. s += abs(pix1[8] - avg2(pix2[8], pix2[9]));
  2718. s += abs(pix1[9] - avg2(pix2[9], pix2[10]));
  2719. s += abs(pix1[10] - avg2(pix2[10], pix2[11]));
  2720. s += abs(pix1[11] - avg2(pix2[11], pix2[12]));
  2721. s += abs(pix1[12] - avg2(pix2[12], pix2[13]));
  2722. s += abs(pix1[13] - avg2(pix2[13], pix2[14]));
  2723. s += abs(pix1[14] - avg2(pix2[14], pix2[15]));
  2724. s += abs(pix1[15] - avg2(pix2[15], pix2[16]));
  2725. pix1 += line_size;
  2726. pix2 += line_size;
  2727. }
  2728. return s;
  2729. }
  2730. static int pix_abs16_y2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  2731. {
  2732. int s, i;
  2733. uint8_t *pix3 = pix2 + line_size;
  2734. s = 0;
  2735. for(i=0;i<h;i++) {
  2736. s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
  2737. s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
  2738. s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
  2739. s += abs(pix1[3] - avg2(pix2[3], pix3[3]));
  2740. s += abs(pix1[4] - avg2(pix2[4], pix3[4]));
  2741. s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
  2742. s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
  2743. s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
  2744. s += abs(pix1[8] - avg2(pix2[8], pix3[8]));
  2745. s += abs(pix1[9] - avg2(pix2[9], pix3[9]));
  2746. s += abs(pix1[10] - avg2(pix2[10], pix3[10]));
  2747. s += abs(pix1[11] - avg2(pix2[11], pix3[11]));
  2748. s += abs(pix1[12] - avg2(pix2[12], pix3[12]));
  2749. s += abs(pix1[13] - avg2(pix2[13], pix3[13]));
  2750. s += abs(pix1[14] - avg2(pix2[14], pix3[14]));
  2751. s += abs(pix1[15] - avg2(pix2[15], pix3[15]));
  2752. pix1 += line_size;
  2753. pix2 += line_size;
  2754. pix3 += line_size;
  2755. }
  2756. return s;
  2757. }
  2758. static int pix_abs16_xy2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  2759. {
  2760. int s, i;
  2761. uint8_t *pix3 = pix2 + line_size;
  2762. s = 0;
  2763. for(i=0;i<h;i++) {
  2764. s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
  2765. s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
  2766. s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
  2767. s += abs(pix1[3] - avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
  2768. s += abs(pix1[4] - avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
  2769. s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
  2770. s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
  2771. s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
  2772. s += abs(pix1[8] - avg4(pix2[8], pix2[9], pix3[8], pix3[9]));
  2773. s += abs(pix1[9] - avg4(pix2[9], pix2[10], pix3[9], pix3[10]));
  2774. s += abs(pix1[10] - avg4(pix2[10], pix2[11], pix3[10], pix3[11]));
  2775. s += abs(pix1[11] - avg4(pix2[11], pix2[12], pix3[11], pix3[12]));
  2776. s += abs(pix1[12] - avg4(pix2[12], pix2[13], pix3[12], pix3[13]));
  2777. s += abs(pix1[13] - avg4(pix2[13], pix2[14], pix3[13], pix3[14]));
  2778. s += abs(pix1[14] - avg4(pix2[14], pix2[15], pix3[14], pix3[15]));
  2779. s += abs(pix1[15] - avg4(pix2[15], pix2[16], pix3[15], pix3[16]));
  2780. pix1 += line_size;
  2781. pix2 += line_size;
  2782. pix3 += line_size;
  2783. }
  2784. return s;
  2785. }
  2786. static inline int pix_abs8_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  2787. {
  2788. int s, i;
  2789. s = 0;
  2790. for(i=0;i<h;i++) {
  2791. s += abs(pix1[0] - pix2[0]);
  2792. s += abs(pix1[1] - pix2[1]);
  2793. s += abs(pix1[2] - pix2[2]);
  2794. s += abs(pix1[3] - pix2[3]);
  2795. s += abs(pix1[4] - pix2[4]);
  2796. s += abs(pix1[5] - pix2[5]);
  2797. s += abs(pix1[6] - pix2[6]);
  2798. s += abs(pix1[7] - pix2[7]);
  2799. pix1 += line_size;
  2800. pix2 += line_size;
  2801. }
  2802. return s;
  2803. }
  2804. static int pix_abs8_x2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  2805. {
  2806. int s, i;
  2807. s = 0;
  2808. for(i=0;i<h;i++) {
  2809. s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
  2810. s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
  2811. s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
  2812. s += abs(pix1[3] - avg2(pix2[3], pix2[4]));
  2813. s += abs(pix1[4] - avg2(pix2[4], pix2[5]));
  2814. s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
  2815. s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
  2816. s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
  2817. pix1 += line_size;
  2818. pix2 += line_size;
  2819. }
  2820. return s;
  2821. }
  2822. static int pix_abs8_y2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  2823. {
  2824. int s, i;
  2825. uint8_t *pix3 = pix2 + line_size;
  2826. s = 0;
  2827. for(i=0;i<h;i++) {
  2828. s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
  2829. s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
  2830. s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
  2831. s += abs(pix1[3] - avg2(pix2[3], pix3[3]));
  2832. s += abs(pix1[4] - avg2(pix2[4], pix3[4]));
  2833. s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
  2834. s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
  2835. s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
  2836. pix1 += line_size;
  2837. pix2 += line_size;
  2838. pix3 += line_size;
  2839. }
  2840. return s;
  2841. }
  2842. static int pix_abs8_xy2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  2843. {
  2844. int s, i;
  2845. uint8_t *pix3 = pix2 + line_size;
  2846. s = 0;
  2847. for(i=0;i<h;i++) {
  2848. s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
  2849. s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
  2850. s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
  2851. s += abs(pix1[3] - avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
  2852. s += abs(pix1[4] - avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
  2853. s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
  2854. s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
  2855. s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
  2856. pix1 += line_size;
  2857. pix2 += line_size;
  2858. pix3 += line_size;
  2859. }
  2860. return s;
  2861. }
  2862. static int nsse16_c(void *v, uint8_t *s1, uint8_t *s2, int stride, int h){
  2863. MpegEncContext *c = v;
  2864. int score1=0;
  2865. int score2=0;
  2866. int x,y;
  2867. for(y=0; y<h; y++){
  2868. for(x=0; x<16; x++){
  2869. score1+= (s1[x ] - s2[x ])*(s1[x ] - s2[x ]);
  2870. }
  2871. if(y+1<h){
  2872. for(x=0; x<15; x++){
  2873. score2+= FFABS( s1[x ] - s1[x +stride]
  2874. - s1[x+1] + s1[x+1+stride])
  2875. -FFABS( s2[x ] - s2[x +stride]
  2876. - s2[x+1] + s2[x+1+stride]);
  2877. }
  2878. }
  2879. s1+= stride;
  2880. s2+= stride;
  2881. }
  2882. if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
  2883. else return score1 + FFABS(score2)*8;
  2884. }
  2885. static int nsse8_c(void *v, uint8_t *s1, uint8_t *s2, int stride, int h){
  2886. MpegEncContext *c = v;
  2887. int score1=0;
  2888. int score2=0;
  2889. int x,y;
  2890. for(y=0; y<h; y++){
  2891. for(x=0; x<8; x++){
  2892. score1+= (s1[x ] - s2[x ])*(s1[x ] - s2[x ]);
  2893. }
  2894. if(y+1<h){
  2895. for(x=0; x<7; x++){
  2896. score2+= FFABS( s1[x ] - s1[x +stride]
  2897. - s1[x+1] + s1[x+1+stride])
  2898. -FFABS( s2[x ] - s2[x +stride]
  2899. - s2[x+1] + s2[x+1+stride]);
  2900. }
  2901. }
  2902. s1+= stride;
  2903. s2+= stride;
  2904. }
  2905. if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
  2906. else return score1 + FFABS(score2)*8;
  2907. }
  2908. static int try_8x8basis_c(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale){
  2909. int i;
  2910. unsigned int sum=0;
  2911. for(i=0; i<8*8; i++){
  2912. int b= rem[i] + ((basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT));
  2913. int w= weight[i];
  2914. b>>= RECON_SHIFT;
  2915. assert(-512<b && b<512);
  2916. sum += (w*b)*(w*b)>>4;
  2917. }
  2918. return sum>>2;
  2919. }
  2920. static void add_8x8basis_c(int16_t rem[64], int16_t basis[64], int scale){
  2921. int i;
  2922. for(i=0; i<8*8; i++){
  2923. rem[i] += (basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT);
  2924. }
  2925. }
  2926. /**
  2927. * permutes an 8x8 block.
  2928. * @param block the block which will be permuted according to the given permutation vector
  2929. * @param permutation the permutation vector
  2930. * @param last the last non zero coefficient in scantable order, used to speed the permutation up
  2931. * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
  2932. * (inverse) permutated to scantable order!
  2933. */
  2934. void ff_block_permute(DCTELEM *block, uint8_t *permutation, const uint8_t *scantable, int last)
  2935. {
  2936. int i;
  2937. DCTELEM temp[64];
  2938. if(last<=0) return;
  2939. //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
  2940. for(i=0; i<=last; i++){
  2941. const int j= scantable[i];
  2942. temp[j]= block[j];
  2943. block[j]=0;
  2944. }
  2945. for(i=0; i<=last; i++){
  2946. const int j= scantable[i];
  2947. const int perm_j= permutation[j];
  2948. block[perm_j]= temp[j];
  2949. }
  2950. }
  2951. static int zero_cmp(void *s, uint8_t *a, uint8_t *b, int stride, int h){
  2952. return 0;
  2953. }
  2954. void ff_set_cmp(DSPContext* c, me_cmp_func *cmp, int type){
  2955. int i;
  2956. memset(cmp, 0, sizeof(void*)*6);
  2957. for(i=0; i<6; i++){
  2958. switch(type&0xFF){
  2959. case FF_CMP_SAD:
  2960. cmp[i]= c->sad[i];
  2961. break;
  2962. case FF_CMP_SATD:
  2963. cmp[i]= c->hadamard8_diff[i];
  2964. break;
  2965. case FF_CMP_SSE:
  2966. cmp[i]= c->sse[i];
  2967. break;
  2968. case FF_CMP_DCT:
  2969. cmp[i]= c->dct_sad[i];
  2970. break;
  2971. case FF_CMP_DCT264:
  2972. cmp[i]= c->dct264_sad[i];
  2973. break;
  2974. case FF_CMP_DCTMAX:
  2975. cmp[i]= c->dct_max[i];
  2976. break;
  2977. case FF_CMP_PSNR:
  2978. cmp[i]= c->quant_psnr[i];
  2979. break;
  2980. case FF_CMP_BIT:
  2981. cmp[i]= c->bit[i];
  2982. break;
  2983. case FF_CMP_RD:
  2984. cmp[i]= c->rd[i];
  2985. break;
  2986. case FF_CMP_VSAD:
  2987. cmp[i]= c->vsad[i];
  2988. break;
  2989. case FF_CMP_VSSE:
  2990. cmp[i]= c->vsse[i];
  2991. break;
  2992. case FF_CMP_ZERO:
  2993. cmp[i]= zero_cmp;
  2994. break;
  2995. case FF_CMP_NSSE:
  2996. cmp[i]= c->nsse[i];
  2997. break;
  2998. #if CONFIG_DWT
  2999. case FF_CMP_W53:
  3000. cmp[i]= c->w53[i];
  3001. break;
  3002. case FF_CMP_W97:
  3003. cmp[i]= c->w97[i];
  3004. break;
  3005. #endif
  3006. default:
  3007. av_log(NULL, AV_LOG_ERROR,"internal error in cmp function selection\n");
  3008. }
  3009. }
  3010. }
  3011. static void clear_block_c(DCTELEM *block)
  3012. {
  3013. memset(block, 0, sizeof(DCTELEM)*64);
  3014. }
  3015. /**
  3016. * memset(blocks, 0, sizeof(DCTELEM)*6*64)
  3017. */
  3018. static void clear_blocks_c(DCTELEM *blocks)
  3019. {
  3020. memset(blocks, 0, sizeof(DCTELEM)*6*64);
  3021. }
  3022. static void add_bytes_c(uint8_t *dst, uint8_t *src, int w){
  3023. long i;
  3024. for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
  3025. long a = *(long*)(src+i);
  3026. long b = *(long*)(dst+i);
  3027. *(long*)(dst+i) = ((a&pb_7f) + (b&pb_7f)) ^ ((a^b)&pb_80);
  3028. }
  3029. for(; i<w; i++)
  3030. dst[i+0] += src[i+0];
  3031. }
  3032. static void add_bytes_l2_c(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
  3033. long i;
  3034. for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
  3035. long a = *(long*)(src1+i);
  3036. long b = *(long*)(src2+i);
  3037. *(long*)(dst+i) = ((a&pb_7f) + (b&pb_7f)) ^ ((a^b)&pb_80);
  3038. }
  3039. for(; i<w; i++)
  3040. dst[i] = src1[i]+src2[i];
  3041. }
  3042. static void diff_bytes_c(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
  3043. long i;
  3044. #if !HAVE_FAST_UNALIGNED
  3045. if((long)src2 & (sizeof(long)-1)){
  3046. for(i=0; i+7<w; i+=8){
  3047. dst[i+0] = src1[i+0]-src2[i+0];
  3048. dst[i+1] = src1[i+1]-src2[i+1];
  3049. dst[i+2] = src1[i+2]-src2[i+2];
  3050. dst[i+3] = src1[i+3]-src2[i+3];
  3051. dst[i+4] = src1[i+4]-src2[i+4];
  3052. dst[i+5] = src1[i+5]-src2[i+5];
  3053. dst[i+6] = src1[i+6]-src2[i+6];
  3054. dst[i+7] = src1[i+7]-src2[i+7];
  3055. }
  3056. }else
  3057. #endif
  3058. for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
  3059. long a = *(long*)(src1+i);
  3060. long b = *(long*)(src2+i);
  3061. *(long*)(dst+i) = ((a|pb_80) - (b&pb_7f)) ^ ((a^b^pb_80)&pb_80);
  3062. }
  3063. for(; i<w; i++)
  3064. dst[i+0] = src1[i+0]-src2[i+0];
  3065. }
  3066. static void add_hfyu_median_prediction_c(uint8_t *dst, const uint8_t *src1, const uint8_t *diff, int w, int *left, int *left_top){
  3067. int i;
  3068. uint8_t l, lt;
  3069. l= *left;
  3070. lt= *left_top;
  3071. for(i=0; i<w; i++){
  3072. l= mid_pred(l, src1[i], (l + src1[i] - lt)&0xFF) + diff[i];
  3073. lt= src1[i];
  3074. dst[i]= l;
  3075. }
  3076. *left= l;
  3077. *left_top= lt;
  3078. }
  3079. static void sub_hfyu_median_prediction_c(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w, int *left, int *left_top){
  3080. int i;
  3081. uint8_t l, lt;
  3082. l= *left;
  3083. lt= *left_top;
  3084. for(i=0; i<w; i++){
  3085. const int pred= mid_pred(l, src1[i], (l + src1[i] - lt)&0xFF);
  3086. lt= src1[i];
  3087. l= src2[i];
  3088. dst[i]= l - pred;
  3089. }
  3090. *left= l;
  3091. *left_top= lt;
  3092. }
  3093. static int add_hfyu_left_prediction_c(uint8_t *dst, const uint8_t *src, int w, int acc){
  3094. int i;
  3095. for(i=0; i<w-1; i++){
  3096. acc+= src[i];
  3097. dst[i]= acc;
  3098. i++;
  3099. acc+= src[i];
  3100. dst[i]= acc;
  3101. }
  3102. for(; i<w; i++){
  3103. acc+= src[i];
  3104. dst[i]= acc;
  3105. }
  3106. return acc;
  3107. }
  3108. #if HAVE_BIGENDIAN
  3109. #define B 3
  3110. #define G 2
  3111. #define R 1
  3112. #define A 0
  3113. #else
  3114. #define B 0
  3115. #define G 1
  3116. #define R 2
  3117. #define A 3
  3118. #endif
  3119. static void add_hfyu_left_prediction_bgr32_c(uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue, int *alpha){
  3120. int i;
  3121. int r,g,b,a;
  3122. r= *red;
  3123. g= *green;
  3124. b= *blue;
  3125. a= *alpha;
  3126. for(i=0; i<w; i++){
  3127. b+= src[4*i+B];
  3128. g+= src[4*i+G];
  3129. r+= src[4*i+R];
  3130. a+= src[4*i+A];
  3131. dst[4*i+B]= b;
  3132. dst[4*i+G]= g;
  3133. dst[4*i+R]= r;
  3134. dst[4*i+A]= a;
  3135. }
  3136. *red= r;
  3137. *green= g;
  3138. *blue= b;
  3139. *alpha= a;
  3140. }
  3141. #undef B
  3142. #undef G
  3143. #undef R
  3144. #undef A
  3145. #define BUTTERFLY2(o1,o2,i1,i2) \
  3146. o1= (i1)+(i2);\
  3147. o2= (i1)-(i2);
  3148. #define BUTTERFLY1(x,y) \
  3149. {\
  3150. int a,b;\
  3151. a= x;\
  3152. b= y;\
  3153. x= a+b;\
  3154. y= a-b;\
  3155. }
  3156. #define BUTTERFLYA(x,y) (FFABS((x)+(y)) + FFABS((x)-(y)))
  3157. static int hadamard8_diff8x8_c(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
  3158. int i;
  3159. int temp[64];
  3160. int sum=0;
  3161. assert(h==8);
  3162. for(i=0; i<8; i++){
  3163. //FIXME try pointer walks
  3164. BUTTERFLY2(temp[8*i+0], temp[8*i+1], src[stride*i+0]-dst[stride*i+0],src[stride*i+1]-dst[stride*i+1]);
  3165. BUTTERFLY2(temp[8*i+2], temp[8*i+3], src[stride*i+2]-dst[stride*i+2],src[stride*i+3]-dst[stride*i+3]);
  3166. BUTTERFLY2(temp[8*i+4], temp[8*i+5], src[stride*i+4]-dst[stride*i+4],src[stride*i+5]-dst[stride*i+5]);
  3167. BUTTERFLY2(temp[8*i+6], temp[8*i+7], src[stride*i+6]-dst[stride*i+6],src[stride*i+7]-dst[stride*i+7]);
  3168. BUTTERFLY1(temp[8*i+0], temp[8*i+2]);
  3169. BUTTERFLY1(temp[8*i+1], temp[8*i+3]);
  3170. BUTTERFLY1(temp[8*i+4], temp[8*i+6]);
  3171. BUTTERFLY1(temp[8*i+5], temp[8*i+7]);
  3172. BUTTERFLY1(temp[8*i+0], temp[8*i+4]);
  3173. BUTTERFLY1(temp[8*i+1], temp[8*i+5]);
  3174. BUTTERFLY1(temp[8*i+2], temp[8*i+6]);
  3175. BUTTERFLY1(temp[8*i+3], temp[8*i+7]);
  3176. }
  3177. for(i=0; i<8; i++){
  3178. BUTTERFLY1(temp[8*0+i], temp[8*1+i]);
  3179. BUTTERFLY1(temp[8*2+i], temp[8*3+i]);
  3180. BUTTERFLY1(temp[8*4+i], temp[8*5+i]);
  3181. BUTTERFLY1(temp[8*6+i], temp[8*7+i]);
  3182. BUTTERFLY1(temp[8*0+i], temp[8*2+i]);
  3183. BUTTERFLY1(temp[8*1+i], temp[8*3+i]);
  3184. BUTTERFLY1(temp[8*4+i], temp[8*6+i]);
  3185. BUTTERFLY1(temp[8*5+i], temp[8*7+i]);
  3186. sum +=
  3187. BUTTERFLYA(temp[8*0+i], temp[8*4+i])
  3188. +BUTTERFLYA(temp[8*1+i], temp[8*5+i])
  3189. +BUTTERFLYA(temp[8*2+i], temp[8*6+i])
  3190. +BUTTERFLYA(temp[8*3+i], temp[8*7+i]);
  3191. }
  3192. #if 0
  3193. static int maxi=0;
  3194. if(sum>maxi){
  3195. maxi=sum;
  3196. printf("MAX:%d\n", maxi);
  3197. }
  3198. #endif
  3199. return sum;
  3200. }
  3201. static int hadamard8_intra8x8_c(/*MpegEncContext*/ void *s, uint8_t *src, uint8_t *dummy, int stride, int h){
  3202. int i;
  3203. int temp[64];
  3204. int sum=0;
  3205. assert(h==8);
  3206. for(i=0; i<8; i++){
  3207. //FIXME try pointer walks
  3208. BUTTERFLY2(temp[8*i+0], temp[8*i+1], src[stride*i+0],src[stride*i+1]);
  3209. BUTTERFLY2(temp[8*i+2], temp[8*i+3], src[stride*i+2],src[stride*i+3]);
  3210. BUTTERFLY2(temp[8*i+4], temp[8*i+5], src[stride*i+4],src[stride*i+5]);
  3211. BUTTERFLY2(temp[8*i+6], temp[8*i+7], src[stride*i+6],src[stride*i+7]);
  3212. BUTTERFLY1(temp[8*i+0], temp[8*i+2]);
  3213. BUTTERFLY1(temp[8*i+1], temp[8*i+3]);
  3214. BUTTERFLY1(temp[8*i+4], temp[8*i+6]);
  3215. BUTTERFLY1(temp[8*i+5], temp[8*i+7]);
  3216. BUTTERFLY1(temp[8*i+0], temp[8*i+4]);
  3217. BUTTERFLY1(temp[8*i+1], temp[8*i+5]);
  3218. BUTTERFLY1(temp[8*i+2], temp[8*i+6]);
  3219. BUTTERFLY1(temp[8*i+3], temp[8*i+7]);
  3220. }
  3221. for(i=0; i<8; i++){
  3222. BUTTERFLY1(temp[8*0+i], temp[8*1+i]);
  3223. BUTTERFLY1(temp[8*2+i], temp[8*3+i]);
  3224. BUTTERFLY1(temp[8*4+i], temp[8*5+i]);
  3225. BUTTERFLY1(temp[8*6+i], temp[8*7+i]);
  3226. BUTTERFLY1(temp[8*0+i], temp[8*2+i]);
  3227. BUTTERFLY1(temp[8*1+i], temp[8*3+i]);
  3228. BUTTERFLY1(temp[8*4+i], temp[8*6+i]);
  3229. BUTTERFLY1(temp[8*5+i], temp[8*7+i]);
  3230. sum +=
  3231. BUTTERFLYA(temp[8*0+i], temp[8*4+i])
  3232. +BUTTERFLYA(temp[8*1+i], temp[8*5+i])
  3233. +BUTTERFLYA(temp[8*2+i], temp[8*6+i])
  3234. +BUTTERFLYA(temp[8*3+i], temp[8*7+i]);
  3235. }
  3236. sum -= FFABS(temp[8*0] + temp[8*4]); // -mean
  3237. return sum;
  3238. }
  3239. static int dct_sad8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
  3240. MpegEncContext * const s= (MpegEncContext *)c;
  3241. LOCAL_ALIGNED_16(DCTELEM, temp, [64]);
  3242. assert(h==8);
  3243. s->dsp.diff_pixels(temp, src1, src2, stride);
  3244. s->dsp.fdct(temp);
  3245. return s->dsp.sum_abs_dctelem(temp);
  3246. }
  3247. #if CONFIG_GPL
  3248. #define DCT8_1D {\
  3249. const int s07 = SRC(0) + SRC(7);\
  3250. const int s16 = SRC(1) + SRC(6);\
  3251. const int s25 = SRC(2) + SRC(5);\
  3252. const int s34 = SRC(3) + SRC(4);\
  3253. const int a0 = s07 + s34;\
  3254. const int a1 = s16 + s25;\
  3255. const int a2 = s07 - s34;\
  3256. const int a3 = s16 - s25;\
  3257. const int d07 = SRC(0) - SRC(7);\
  3258. const int d16 = SRC(1) - SRC(6);\
  3259. const int d25 = SRC(2) - SRC(5);\
  3260. const int d34 = SRC(3) - SRC(4);\
  3261. const int a4 = d16 + d25 + (d07 + (d07>>1));\
  3262. const int a5 = d07 - d34 - (d25 + (d25>>1));\
  3263. const int a6 = d07 + d34 - (d16 + (d16>>1));\
  3264. const int a7 = d16 - d25 + (d34 + (d34>>1));\
  3265. DST(0, a0 + a1 ) ;\
  3266. DST(1, a4 + (a7>>2)) ;\
  3267. DST(2, a2 + (a3>>1)) ;\
  3268. DST(3, a5 + (a6>>2)) ;\
  3269. DST(4, a0 - a1 ) ;\
  3270. DST(5, a6 - (a5>>2)) ;\
  3271. DST(6, (a2>>1) - a3 ) ;\
  3272. DST(7, (a4>>2) - a7 ) ;\
  3273. }
  3274. static int dct264_sad8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
  3275. MpegEncContext * const s= (MpegEncContext *)c;
  3276. DCTELEM dct[8][8];
  3277. int i;
  3278. int sum=0;
  3279. s->dsp.diff_pixels(dct[0], src1, src2, stride);
  3280. #define SRC(x) dct[i][x]
  3281. #define DST(x,v) dct[i][x]= v
  3282. for( i = 0; i < 8; i++ )
  3283. DCT8_1D
  3284. #undef SRC
  3285. #undef DST
  3286. #define SRC(x) dct[x][i]
  3287. #define DST(x,v) sum += FFABS(v)
  3288. for( i = 0; i < 8; i++ )
  3289. DCT8_1D
  3290. #undef SRC
  3291. #undef DST
  3292. return sum;
  3293. }
  3294. #endif
  3295. static int dct_max8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
  3296. MpegEncContext * const s= (MpegEncContext *)c;
  3297. LOCAL_ALIGNED_16(DCTELEM, temp, [64]);
  3298. int sum=0, i;
  3299. assert(h==8);
  3300. s->dsp.diff_pixels(temp, src1, src2, stride);
  3301. s->dsp.fdct(temp);
  3302. for(i=0; i<64; i++)
  3303. sum= FFMAX(sum, FFABS(temp[i]));
  3304. return sum;
  3305. }
  3306. static int quant_psnr8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
  3307. MpegEncContext * const s= (MpegEncContext *)c;
  3308. LOCAL_ALIGNED_16(DCTELEM, temp, [64*2]);
  3309. DCTELEM * const bak = temp+64;
  3310. int sum=0, i;
  3311. assert(h==8);
  3312. s->mb_intra=0;
  3313. s->dsp.diff_pixels(temp, src1, src2, stride);
  3314. memcpy(bak, temp, 64*sizeof(DCTELEM));
  3315. s->block_last_index[0/*FIXME*/]= s->fast_dct_quantize(s, temp, 0/*FIXME*/, s->qscale, &i);
  3316. s->dct_unquantize_inter(s, temp, 0, s->qscale);
  3317. ff_simple_idct(temp); //FIXME
  3318. for(i=0; i<64; i++)
  3319. sum+= (temp[i]-bak[i])*(temp[i]-bak[i]);
  3320. return sum;
  3321. }
  3322. static int rd8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
  3323. MpegEncContext * const s= (MpegEncContext *)c;
  3324. const uint8_t *scantable= s->intra_scantable.permutated;
  3325. LOCAL_ALIGNED_16(DCTELEM, temp, [64]);
  3326. LOCAL_ALIGNED_16(uint8_t, lsrc1, [64]);
  3327. LOCAL_ALIGNED_16(uint8_t, lsrc2, [64]);
  3328. int i, last, run, bits, level, distortion, start_i;
  3329. const int esc_length= s->ac_esc_length;
  3330. uint8_t * length;
  3331. uint8_t * last_length;
  3332. assert(h==8);
  3333. copy_block8(lsrc1, src1, 8, stride, 8);
  3334. copy_block8(lsrc2, src2, 8, stride, 8);
  3335. s->dsp.diff_pixels(temp, lsrc1, lsrc2, 8);
  3336. s->block_last_index[0/*FIXME*/]= last= s->fast_dct_quantize(s, temp, 0/*FIXME*/, s->qscale, &i);
  3337. bits=0;
  3338. if (s->mb_intra) {
  3339. start_i = 1;
  3340. length = s->intra_ac_vlc_length;
  3341. last_length= s->intra_ac_vlc_last_length;
  3342. bits+= s->luma_dc_vlc_length[temp[0] + 256]; //FIXME chroma
  3343. } else {
  3344. start_i = 0;
  3345. length = s->inter_ac_vlc_length;
  3346. last_length= s->inter_ac_vlc_last_length;
  3347. }
  3348. if(last>=start_i){
  3349. run=0;
  3350. for(i=start_i; i<last; i++){
  3351. int j= scantable[i];
  3352. level= temp[j];
  3353. if(level){
  3354. level+=64;
  3355. if((level&(~127)) == 0){
  3356. bits+= length[UNI_AC_ENC_INDEX(run, level)];
  3357. }else
  3358. bits+= esc_length;
  3359. run=0;
  3360. }else
  3361. run++;
  3362. }
  3363. i= scantable[last];
  3364. level= temp[i] + 64;
  3365. assert(level - 64);
  3366. if((level&(~127)) == 0){
  3367. bits+= last_length[UNI_AC_ENC_INDEX(run, level)];
  3368. }else
  3369. bits+= esc_length;
  3370. }
  3371. if(last>=0){
  3372. if(s->mb_intra)
  3373. s->dct_unquantize_intra(s, temp, 0, s->qscale);
  3374. else
  3375. s->dct_unquantize_inter(s, temp, 0, s->qscale);
  3376. }
  3377. s->dsp.idct_add(lsrc2, 8, temp);
  3378. distortion= s->dsp.sse[1](NULL, lsrc2, lsrc1, 8, 8);
  3379. return distortion + ((bits*s->qscale*s->qscale*109 + 64)>>7);
  3380. }
  3381. static int bit8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
  3382. MpegEncContext * const s= (MpegEncContext *)c;
  3383. const uint8_t *scantable= s->intra_scantable.permutated;
  3384. LOCAL_ALIGNED_16(DCTELEM, temp, [64]);
  3385. int i, last, run, bits, level, start_i;
  3386. const int esc_length= s->ac_esc_length;
  3387. uint8_t * length;
  3388. uint8_t * last_length;
  3389. assert(h==8);
  3390. s->dsp.diff_pixels(temp, src1, src2, stride);
  3391. s->block_last_index[0/*FIXME*/]= last= s->fast_dct_quantize(s, temp, 0/*FIXME*/, s->qscale, &i);
  3392. bits=0;
  3393. if (s->mb_intra) {
  3394. start_i = 1;
  3395. length = s->intra_ac_vlc_length;
  3396. last_length= s->intra_ac_vlc_last_length;
  3397. bits+= s->luma_dc_vlc_length[temp[0] + 256]; //FIXME chroma
  3398. } else {
  3399. start_i = 0;
  3400. length = s->inter_ac_vlc_length;
  3401. last_length= s->inter_ac_vlc_last_length;
  3402. }
  3403. if(last>=start_i){
  3404. run=0;
  3405. for(i=start_i; i<last; i++){
  3406. int j= scantable[i];
  3407. level= temp[j];
  3408. if(level){
  3409. level+=64;
  3410. if((level&(~127)) == 0){
  3411. bits+= length[UNI_AC_ENC_INDEX(run, level)];
  3412. }else
  3413. bits+= esc_length;
  3414. run=0;
  3415. }else
  3416. run++;
  3417. }
  3418. i= scantable[last];
  3419. level= temp[i] + 64;
  3420. assert(level - 64);
  3421. if((level&(~127)) == 0){
  3422. bits+= last_length[UNI_AC_ENC_INDEX(run, level)];
  3423. }else
  3424. bits+= esc_length;
  3425. }
  3426. return bits;
  3427. }
  3428. #define VSAD_INTRA(size) \
  3429. static int vsad_intra##size##_c(/*MpegEncContext*/ void *c, uint8_t *s, uint8_t *dummy, int stride, int h){ \
  3430. int score=0; \
  3431. int x,y; \
  3432. \
  3433. for(y=1; y<h; y++){ \
  3434. for(x=0; x<size; x+=4){ \
  3435. score+= FFABS(s[x ] - s[x +stride]) + FFABS(s[x+1] - s[x+1+stride]) \
  3436. +FFABS(s[x+2] - s[x+2+stride]) + FFABS(s[x+3] - s[x+3+stride]); \
  3437. } \
  3438. s+= stride; \
  3439. } \
  3440. \
  3441. return score; \
  3442. }
  3443. VSAD_INTRA(8)
  3444. VSAD_INTRA(16)
  3445. static int vsad16_c(/*MpegEncContext*/ void *c, uint8_t *s1, uint8_t *s2, int stride, int h){
  3446. int score=0;
  3447. int x,y;
  3448. for(y=1; y<h; y++){
  3449. for(x=0; x<16; x++){
  3450. score+= FFABS(s1[x ] - s2[x ] - s1[x +stride] + s2[x +stride]);
  3451. }
  3452. s1+= stride;
  3453. s2+= stride;
  3454. }
  3455. return score;
  3456. }
  3457. #define SQ(a) ((a)*(a))
  3458. #define VSSE_INTRA(size) \
  3459. static int vsse_intra##size##_c(/*MpegEncContext*/ void *c, uint8_t *s, uint8_t *dummy, int stride, int h){ \
  3460. int score=0; \
  3461. int x,y; \
  3462. \
  3463. for(y=1; y<h; y++){ \
  3464. for(x=0; x<size; x+=4){ \
  3465. score+= SQ(s[x ] - s[x +stride]) + SQ(s[x+1] - s[x+1+stride]) \
  3466. +SQ(s[x+2] - s[x+2+stride]) + SQ(s[x+3] - s[x+3+stride]); \
  3467. } \
  3468. s+= stride; \
  3469. } \
  3470. \
  3471. return score; \
  3472. }
  3473. VSSE_INTRA(8)
  3474. VSSE_INTRA(16)
  3475. static int vsse16_c(/*MpegEncContext*/ void *c, uint8_t *s1, uint8_t *s2, int stride, int h){
  3476. int score=0;
  3477. int x,y;
  3478. for(y=1; y<h; y++){
  3479. for(x=0; x<16; x++){
  3480. score+= SQ(s1[x ] - s2[x ] - s1[x +stride] + s2[x +stride]);
  3481. }
  3482. s1+= stride;
  3483. s2+= stride;
  3484. }
  3485. return score;
  3486. }
  3487. static int ssd_int8_vs_int16_c(const int8_t *pix1, const int16_t *pix2,
  3488. int size){
  3489. int score=0;
  3490. int i;
  3491. for(i=0; i<size; i++)
  3492. score += (pix1[i]-pix2[i])*(pix1[i]-pix2[i]);
  3493. return score;
  3494. }
  3495. WRAPPER8_16_SQ(hadamard8_diff8x8_c, hadamard8_diff16_c)
  3496. WRAPPER8_16_SQ(hadamard8_intra8x8_c, hadamard8_intra16_c)
  3497. WRAPPER8_16_SQ(dct_sad8x8_c, dct_sad16_c)
  3498. #if CONFIG_GPL
  3499. WRAPPER8_16_SQ(dct264_sad8x8_c, dct264_sad16_c)
  3500. #endif
  3501. WRAPPER8_16_SQ(dct_max8x8_c, dct_max16_c)
  3502. WRAPPER8_16_SQ(quant_psnr8x8_c, quant_psnr16_c)
  3503. WRAPPER8_16_SQ(rd8x8_c, rd16_c)
  3504. WRAPPER8_16_SQ(bit8x8_c, bit16_c)
  3505. static void vector_fmul_c(float *dst, const float *src, int len){
  3506. int i;
  3507. for(i=0; i<len; i++)
  3508. dst[i] *= src[i];
  3509. }
  3510. static void vector_fmul_reverse_c(float *dst, const float *src0, const float *src1, int len){
  3511. int i;
  3512. src1 += len-1;
  3513. for(i=0; i<len; i++)
  3514. dst[i] = src0[i] * src1[-i];
  3515. }
  3516. static void vector_fmul_add_c(float *dst, const float *src0, const float *src1, const float *src2, int len){
  3517. int i;
  3518. for(i=0; i<len; i++)
  3519. dst[i] = src0[i] * src1[i] + src2[i];
  3520. }
  3521. void ff_vector_fmul_window_c(float *dst, const float *src0, const float *src1, const float *win, float add_bias, int len){
  3522. int i,j;
  3523. dst += len;
  3524. win += len;
  3525. src0+= len;
  3526. for(i=-len, j=len-1; i<0; i++, j--) {
  3527. float s0 = src0[i];
  3528. float s1 = src1[j];
  3529. float wi = win[i];
  3530. float wj = win[j];
  3531. dst[i] = s0*wj - s1*wi + add_bias;
  3532. dst[j] = s0*wi + s1*wj + add_bias;
  3533. }
  3534. }
  3535. static void vector_fmul_scalar_c(float *dst, const float *src, float mul,
  3536. int len)
  3537. {
  3538. int i;
  3539. for (i = 0; i < len; i++)
  3540. dst[i] = src[i] * mul;
  3541. }
  3542. static void vector_fmul_sv_scalar_2_c(float *dst, const float *src,
  3543. const float **sv, float mul, int len)
  3544. {
  3545. int i;
  3546. for (i = 0; i < len; i += 2, sv++) {
  3547. dst[i ] = src[i ] * sv[0][0] * mul;
  3548. dst[i+1] = src[i+1] * sv[0][1] * mul;
  3549. }
  3550. }
  3551. static void vector_fmul_sv_scalar_4_c(float *dst, const float *src,
  3552. const float **sv, float mul, int len)
  3553. {
  3554. int i;
  3555. for (i = 0; i < len; i += 4, sv++) {
  3556. dst[i ] = src[i ] * sv[0][0] * mul;
  3557. dst[i+1] = src[i+1] * sv[0][1] * mul;
  3558. dst[i+2] = src[i+2] * sv[0][2] * mul;
  3559. dst[i+3] = src[i+3] * sv[0][3] * mul;
  3560. }
  3561. }
  3562. static void sv_fmul_scalar_2_c(float *dst, const float **sv, float mul,
  3563. int len)
  3564. {
  3565. int i;
  3566. for (i = 0; i < len; i += 2, sv++) {
  3567. dst[i ] = sv[0][0] * mul;
  3568. dst[i+1] = sv[0][1] * mul;
  3569. }
  3570. }
  3571. static void sv_fmul_scalar_4_c(float *dst, const float **sv, float mul,
  3572. int len)
  3573. {
  3574. int i;
  3575. for (i = 0; i < len; i += 4, sv++) {
  3576. dst[i ] = sv[0][0] * mul;
  3577. dst[i+1] = sv[0][1] * mul;
  3578. dst[i+2] = sv[0][2] * mul;
  3579. dst[i+3] = sv[0][3] * mul;
  3580. }
  3581. }
  3582. static void butterflies_float_c(float *restrict v1, float *restrict v2,
  3583. int len)
  3584. {
  3585. int i;
  3586. for (i = 0; i < len; i++) {
  3587. float t = v1[i] - v2[i];
  3588. v1[i] += v2[i];
  3589. v2[i] = t;
  3590. }
  3591. }
  3592. static float scalarproduct_float_c(const float *v1, const float *v2, int len)
  3593. {
  3594. float p = 0.0;
  3595. int i;
  3596. for (i = 0; i < len; i++)
  3597. p += v1[i] * v2[i];
  3598. return p;
  3599. }
  3600. static void int32_to_float_fmul_scalar_c(float *dst, const int *src, float mul, int len){
  3601. int i;
  3602. for(i=0; i<len; i++)
  3603. dst[i] = src[i] * mul;
  3604. }
  3605. static inline uint32_t clipf_c_one(uint32_t a, uint32_t mini,
  3606. uint32_t maxi, uint32_t maxisign)
  3607. {
  3608. if(a > mini) return mini;
  3609. else if((a^(1<<31)) > maxisign) return maxi;
  3610. else return a;
  3611. }
  3612. static void vector_clipf_c_opposite_sign(float *dst, const float *src, float *min, float *max, int len){
  3613. int i;
  3614. uint32_t mini = *(uint32_t*)min;
  3615. uint32_t maxi = *(uint32_t*)max;
  3616. uint32_t maxisign = maxi ^ (1<<31);
  3617. uint32_t *dsti = (uint32_t*)dst;
  3618. const uint32_t *srci = (const uint32_t*)src;
  3619. for(i=0; i<len; i+=8) {
  3620. dsti[i + 0] = clipf_c_one(srci[i + 0], mini, maxi, maxisign);
  3621. dsti[i + 1] = clipf_c_one(srci[i + 1], mini, maxi, maxisign);
  3622. dsti[i + 2] = clipf_c_one(srci[i + 2], mini, maxi, maxisign);
  3623. dsti[i + 3] = clipf_c_one(srci[i + 3], mini, maxi, maxisign);
  3624. dsti[i + 4] = clipf_c_one(srci[i + 4], mini, maxi, maxisign);
  3625. dsti[i + 5] = clipf_c_one(srci[i + 5], mini, maxi, maxisign);
  3626. dsti[i + 6] = clipf_c_one(srci[i + 6], mini, maxi, maxisign);
  3627. dsti[i + 7] = clipf_c_one(srci[i + 7], mini, maxi, maxisign);
  3628. }
  3629. }
  3630. static void vector_clipf_c(float *dst, const float *src, float min, float max, int len){
  3631. int i;
  3632. if(min < 0 && max > 0) {
  3633. vector_clipf_c_opposite_sign(dst, src, &min, &max, len);
  3634. } else {
  3635. for(i=0; i < len; i+=8) {
  3636. dst[i ] = av_clipf(src[i ], min, max);
  3637. dst[i + 1] = av_clipf(src[i + 1], min, max);
  3638. dst[i + 2] = av_clipf(src[i + 2], min, max);
  3639. dst[i + 3] = av_clipf(src[i + 3], min, max);
  3640. dst[i + 4] = av_clipf(src[i + 4], min, max);
  3641. dst[i + 5] = av_clipf(src[i + 5], min, max);
  3642. dst[i + 6] = av_clipf(src[i + 6], min, max);
  3643. dst[i + 7] = av_clipf(src[i + 7], min, max);
  3644. }
  3645. }
  3646. }
  3647. static av_always_inline int float_to_int16_one(const float *src){
  3648. int_fast32_t tmp = *(const int32_t*)src;
  3649. if(tmp & 0xf0000){
  3650. tmp = (0x43c0ffff - tmp)>>31;
  3651. // is this faster on some gcc/cpu combinations?
  3652. // if(tmp > 0x43c0ffff) tmp = 0xFFFF;
  3653. // else tmp = 0;
  3654. }
  3655. return tmp - 0x8000;
  3656. }
  3657. void ff_float_to_int16_c(int16_t *dst, const float *src, long len){
  3658. int i;
  3659. for(i=0; i<len; i++)
  3660. dst[i] = float_to_int16_one(src+i);
  3661. }
  3662. void ff_float_to_int16_interleave_c(int16_t *dst, const float **src, long len, int channels){
  3663. int i,j,c;
  3664. if(channels==2){
  3665. for(i=0; i<len; i++){
  3666. dst[2*i] = float_to_int16_one(src[0]+i);
  3667. dst[2*i+1] = float_to_int16_one(src[1]+i);
  3668. }
  3669. }else{
  3670. for(c=0; c<channels; c++)
  3671. for(i=0, j=c; i<len; i++, j+=channels)
  3672. dst[j] = float_to_int16_one(src[c]+i);
  3673. }
  3674. }
  3675. static int32_t scalarproduct_int16_c(int16_t * v1, int16_t * v2, int order, int shift)
  3676. {
  3677. int res = 0;
  3678. while (order--)
  3679. res += (*v1++ * *v2++) >> shift;
  3680. return res;
  3681. }
  3682. static int32_t scalarproduct_and_madd_int16_c(int16_t *v1, int16_t *v2, int16_t *v3, int order, int mul)
  3683. {
  3684. int res = 0;
  3685. while (order--) {
  3686. res += *v1 * *v2++;
  3687. *v1++ += mul * *v3++;
  3688. }
  3689. return res;
  3690. }
  3691. #define W0 2048
  3692. #define W1 2841 /* 2048*sqrt (2)*cos (1*pi/16) */
  3693. #define W2 2676 /* 2048*sqrt (2)*cos (2*pi/16) */
  3694. #define W3 2408 /* 2048*sqrt (2)*cos (3*pi/16) */
  3695. #define W4 2048 /* 2048*sqrt (2)*cos (4*pi/16) */
  3696. #define W5 1609 /* 2048*sqrt (2)*cos (5*pi/16) */
  3697. #define W6 1108 /* 2048*sqrt (2)*cos (6*pi/16) */
  3698. #define W7 565 /* 2048*sqrt (2)*cos (7*pi/16) */
  3699. static void wmv2_idct_row(short * b)
  3700. {
  3701. int s1,s2;
  3702. int a0,a1,a2,a3,a4,a5,a6,a7;
  3703. /*step 1*/
  3704. a1 = W1*b[1]+W7*b[7];
  3705. a7 = W7*b[1]-W1*b[7];
  3706. a5 = W5*b[5]+W3*b[3];
  3707. a3 = W3*b[5]-W5*b[3];
  3708. a2 = W2*b[2]+W6*b[6];
  3709. a6 = W6*b[2]-W2*b[6];
  3710. a0 = W0*b[0]+W0*b[4];
  3711. a4 = W0*b[0]-W0*b[4];
  3712. /*step 2*/
  3713. s1 = (181*(a1-a5+a7-a3)+128)>>8;//1,3,5,7,
  3714. s2 = (181*(a1-a5-a7+a3)+128)>>8;
  3715. /*step 3*/
  3716. b[0] = (a0+a2+a1+a5 + (1<<7))>>8;
  3717. b[1] = (a4+a6 +s1 + (1<<7))>>8;
  3718. b[2] = (a4-a6 +s2 + (1<<7))>>8;
  3719. b[3] = (a0-a2+a7+a3 + (1<<7))>>8;
  3720. b[4] = (a0-a2-a7-a3 + (1<<7))>>8;
  3721. b[5] = (a4-a6 -s2 + (1<<7))>>8;
  3722. b[6] = (a4+a6 -s1 + (1<<7))>>8;
  3723. b[7] = (a0+a2-a1-a5 + (1<<7))>>8;
  3724. }
  3725. static void wmv2_idct_col(short * b)
  3726. {
  3727. int s1,s2;
  3728. int a0,a1,a2,a3,a4,a5,a6,a7;
  3729. /*step 1, with extended precision*/
  3730. a1 = (W1*b[8*1]+W7*b[8*7] + 4)>>3;
  3731. a7 = (W7*b[8*1]-W1*b[8*7] + 4)>>3;
  3732. a5 = (W5*b[8*5]+W3*b[8*3] + 4)>>3;
  3733. a3 = (W3*b[8*5]-W5*b[8*3] + 4)>>3;
  3734. a2 = (W2*b[8*2]+W6*b[8*6] + 4)>>3;
  3735. a6 = (W6*b[8*2]-W2*b[8*6] + 4)>>3;
  3736. a0 = (W0*b[8*0]+W0*b[8*4] )>>3;
  3737. a4 = (W0*b[8*0]-W0*b[8*4] )>>3;
  3738. /*step 2*/
  3739. s1 = (181*(a1-a5+a7-a3)+128)>>8;
  3740. s2 = (181*(a1-a5-a7+a3)+128)>>8;
  3741. /*step 3*/
  3742. b[8*0] = (a0+a2+a1+a5 + (1<<13))>>14;
  3743. b[8*1] = (a4+a6 +s1 + (1<<13))>>14;
  3744. b[8*2] = (a4-a6 +s2 + (1<<13))>>14;
  3745. b[8*3] = (a0-a2+a7+a3 + (1<<13))>>14;
  3746. b[8*4] = (a0-a2-a7-a3 + (1<<13))>>14;
  3747. b[8*5] = (a4-a6 -s2 + (1<<13))>>14;
  3748. b[8*6] = (a4+a6 -s1 + (1<<13))>>14;
  3749. b[8*7] = (a0+a2-a1-a5 + (1<<13))>>14;
  3750. }
  3751. void ff_wmv2_idct_c(short * block){
  3752. int i;
  3753. for(i=0;i<64;i+=8){
  3754. wmv2_idct_row(block+i);
  3755. }
  3756. for(i=0;i<8;i++){
  3757. wmv2_idct_col(block+i);
  3758. }
  3759. }
  3760. /* XXX: those functions should be suppressed ASAP when all IDCTs are
  3761. converted */
  3762. static void ff_wmv2_idct_put_c(uint8_t *dest, int line_size, DCTELEM *block)
  3763. {
  3764. ff_wmv2_idct_c(block);
  3765. put_pixels_clamped_c(block, dest, line_size);
  3766. }
  3767. static void ff_wmv2_idct_add_c(uint8_t *dest, int line_size, DCTELEM *block)
  3768. {
  3769. ff_wmv2_idct_c(block);
  3770. add_pixels_clamped_c(block, dest, line_size);
  3771. }
  3772. static void ff_jref_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
  3773. {
  3774. j_rev_dct (block);
  3775. put_pixels_clamped_c(block, dest, line_size);
  3776. }
  3777. static void ff_jref_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
  3778. {
  3779. j_rev_dct (block);
  3780. add_pixels_clamped_c(block, dest, line_size);
  3781. }
  3782. static void ff_jref_idct4_put(uint8_t *dest, int line_size, DCTELEM *block)
  3783. {
  3784. j_rev_dct4 (block);
  3785. put_pixels_clamped4_c(block, dest, line_size);
  3786. }
  3787. static void ff_jref_idct4_add(uint8_t *dest, int line_size, DCTELEM *block)
  3788. {
  3789. j_rev_dct4 (block);
  3790. add_pixels_clamped4_c(block, dest, line_size);
  3791. }
  3792. static void ff_jref_idct2_put(uint8_t *dest, int line_size, DCTELEM *block)
  3793. {
  3794. j_rev_dct2 (block);
  3795. put_pixels_clamped2_c(block, dest, line_size);
  3796. }
  3797. static void ff_jref_idct2_add(uint8_t *dest, int line_size, DCTELEM *block)
  3798. {
  3799. j_rev_dct2 (block);
  3800. add_pixels_clamped2_c(block, dest, line_size);
  3801. }
  3802. static void ff_jref_idct1_put(uint8_t *dest, int line_size, DCTELEM *block)
  3803. {
  3804. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  3805. dest[0] = cm[(block[0] + 4)>>3];
  3806. }
  3807. static void ff_jref_idct1_add(uint8_t *dest, int line_size, DCTELEM *block)
  3808. {
  3809. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  3810. dest[0] = cm[dest[0] + ((block[0] + 4)>>3)];
  3811. }
  3812. static void just_return(void *mem av_unused, int stride av_unused, int h av_unused) { return; }
  3813. /* init static data */
  3814. av_cold void dsputil_static_init(void)
  3815. {
  3816. int i;
  3817. for(i=0;i<256;i++) ff_cropTbl[i + MAX_NEG_CROP] = i;
  3818. for(i=0;i<MAX_NEG_CROP;i++) {
  3819. ff_cropTbl[i] = 0;
  3820. ff_cropTbl[i + MAX_NEG_CROP + 256] = 255;
  3821. }
  3822. for(i=0;i<512;i++) {
  3823. ff_squareTbl[i] = (i - 256) * (i - 256);
  3824. }
  3825. for(i=0; i<64; i++) inv_zigzag_direct16[ff_zigzag_direct[i]]= i+1;
  3826. }
  3827. int ff_check_alignment(void){
  3828. static int did_fail=0;
  3829. DECLARE_ALIGNED(16, int, aligned);
  3830. if((intptr_t)&aligned & 15){
  3831. if(!did_fail){
  3832. #if HAVE_MMX || HAVE_ALTIVEC
  3833. av_log(NULL, AV_LOG_ERROR,
  3834. "Compiler did not align stack variables. Libavcodec has been miscompiled\n"
  3835. "and may be very slow or crash. This is not a bug in libavcodec,\n"
  3836. "but in the compiler. You may try recompiling using gcc >= 4.2.\n"
  3837. "Do not report crashes to FFmpeg developers.\n");
  3838. #endif
  3839. did_fail=1;
  3840. }
  3841. return -1;
  3842. }
  3843. return 0;
  3844. }
  3845. av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx)
  3846. {
  3847. int i;
  3848. ff_check_alignment();
  3849. #if CONFIG_ENCODERS
  3850. if(avctx->dct_algo==FF_DCT_FASTINT) {
  3851. c->fdct = fdct_ifast;
  3852. c->fdct248 = fdct_ifast248;
  3853. }
  3854. else if(avctx->dct_algo==FF_DCT_FAAN) {
  3855. c->fdct = ff_faandct;
  3856. c->fdct248 = ff_faandct248;
  3857. }
  3858. else {
  3859. c->fdct = ff_jpeg_fdct_islow; //slow/accurate/default
  3860. c->fdct248 = ff_fdct248_islow;
  3861. }
  3862. #endif //CONFIG_ENCODERS
  3863. if(avctx->lowres==1){
  3864. if(avctx->idct_algo==FF_IDCT_INT || avctx->idct_algo==FF_IDCT_AUTO || !CONFIG_H264_DECODER){
  3865. c->idct_put= ff_jref_idct4_put;
  3866. c->idct_add= ff_jref_idct4_add;
  3867. }else{
  3868. c->idct_put= ff_h264_lowres_idct_put_c;
  3869. c->idct_add= ff_h264_lowres_idct_add_c;
  3870. }
  3871. c->idct = j_rev_dct4;
  3872. c->idct_permutation_type= FF_NO_IDCT_PERM;
  3873. }else if(avctx->lowres==2){
  3874. c->idct_put= ff_jref_idct2_put;
  3875. c->idct_add= ff_jref_idct2_add;
  3876. c->idct = j_rev_dct2;
  3877. c->idct_permutation_type= FF_NO_IDCT_PERM;
  3878. }else if(avctx->lowres==3){
  3879. c->idct_put= ff_jref_idct1_put;
  3880. c->idct_add= ff_jref_idct1_add;
  3881. c->idct = j_rev_dct1;
  3882. c->idct_permutation_type= FF_NO_IDCT_PERM;
  3883. }else{
  3884. if(avctx->idct_algo==FF_IDCT_INT){
  3885. c->idct_put= ff_jref_idct_put;
  3886. c->idct_add= ff_jref_idct_add;
  3887. c->idct = j_rev_dct;
  3888. c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
  3889. }else if((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER || CONFIG_VP6_DECODER ) &&
  3890. avctx->idct_algo==FF_IDCT_VP3){
  3891. c->idct_put= ff_vp3_idct_put_c;
  3892. c->idct_add= ff_vp3_idct_add_c;
  3893. c->idct = ff_vp3_idct_c;
  3894. c->idct_permutation_type= FF_NO_IDCT_PERM;
  3895. }else if(avctx->idct_algo==FF_IDCT_WMV2){
  3896. c->idct_put= ff_wmv2_idct_put_c;
  3897. c->idct_add= ff_wmv2_idct_add_c;
  3898. c->idct = ff_wmv2_idct_c;
  3899. c->idct_permutation_type= FF_NO_IDCT_PERM;
  3900. }else if(avctx->idct_algo==FF_IDCT_FAAN){
  3901. c->idct_put= ff_faanidct_put;
  3902. c->idct_add= ff_faanidct_add;
  3903. c->idct = ff_faanidct;
  3904. c->idct_permutation_type= FF_NO_IDCT_PERM;
  3905. }else if(CONFIG_EATGQ_DECODER && avctx->idct_algo==FF_IDCT_EA) {
  3906. c->idct_put= ff_ea_idct_put_c;
  3907. c->idct_permutation_type= FF_NO_IDCT_PERM;
  3908. }else if(CONFIG_BINK_DECODER && avctx->idct_algo==FF_IDCT_BINK) {
  3909. c->idct = ff_bink_idct_c;
  3910. c->idct_add = ff_bink_idct_add_c;
  3911. c->idct_put = ff_bink_idct_put_c;
  3912. c->idct_permutation_type = FF_NO_IDCT_PERM;
  3913. }else{ //accurate/default
  3914. c->idct_put= ff_simple_idct_put;
  3915. c->idct_add= ff_simple_idct_add;
  3916. c->idct = ff_simple_idct;
  3917. c->idct_permutation_type= FF_NO_IDCT_PERM;
  3918. }
  3919. }
  3920. c->get_pixels = get_pixels_c;
  3921. c->diff_pixels = diff_pixels_c;
  3922. c->put_pixels_clamped = put_pixels_clamped_c;
  3923. c->put_signed_pixels_clamped = put_signed_pixels_clamped_c;
  3924. c->put_pixels_nonclamped = put_pixels_nonclamped_c;
  3925. c->add_pixels_clamped = add_pixels_clamped_c;
  3926. c->add_pixels8 = add_pixels8_c;
  3927. c->add_pixels4 = add_pixels4_c;
  3928. c->sum_abs_dctelem = sum_abs_dctelem_c;
  3929. c->gmc1 = gmc1_c;
  3930. c->gmc = ff_gmc_c;
  3931. c->clear_block = clear_block_c;
  3932. c->clear_blocks = clear_blocks_c;
  3933. c->pix_sum = pix_sum_c;
  3934. c->pix_norm1 = pix_norm1_c;
  3935. c->fill_block_tab[0] = fill_block16_c;
  3936. c->fill_block_tab[1] = fill_block8_c;
  3937. c->scale_block = scale_block_c;
  3938. /* TODO [0] 16 [1] 8 */
  3939. c->pix_abs[0][0] = pix_abs16_c;
  3940. c->pix_abs[0][1] = pix_abs16_x2_c;
  3941. c->pix_abs[0][2] = pix_abs16_y2_c;
  3942. c->pix_abs[0][3] = pix_abs16_xy2_c;
  3943. c->pix_abs[1][0] = pix_abs8_c;
  3944. c->pix_abs[1][1] = pix_abs8_x2_c;
  3945. c->pix_abs[1][2] = pix_abs8_y2_c;
  3946. c->pix_abs[1][3] = pix_abs8_xy2_c;
  3947. #define dspfunc(PFX, IDX, NUM) \
  3948. c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## NUM ## _c; \
  3949. c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## NUM ## _x2_c; \
  3950. c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## NUM ## _y2_c; \
  3951. c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## NUM ## _xy2_c
  3952. dspfunc(put, 0, 16);
  3953. dspfunc(put_no_rnd, 0, 16);
  3954. dspfunc(put, 1, 8);
  3955. dspfunc(put_no_rnd, 1, 8);
  3956. dspfunc(put, 2, 4);
  3957. dspfunc(put, 3, 2);
  3958. dspfunc(avg, 0, 16);
  3959. dspfunc(avg_no_rnd, 0, 16);
  3960. dspfunc(avg, 1, 8);
  3961. dspfunc(avg_no_rnd, 1, 8);
  3962. dspfunc(avg, 2, 4);
  3963. dspfunc(avg, 3, 2);
  3964. #undef dspfunc
  3965. c->put_no_rnd_pixels_l2[0]= put_no_rnd_pixels16_l2_c;
  3966. c->put_no_rnd_pixels_l2[1]= put_no_rnd_pixels8_l2_c;
  3967. c->put_tpel_pixels_tab[ 0] = put_tpel_pixels_mc00_c;
  3968. c->put_tpel_pixels_tab[ 1] = put_tpel_pixels_mc10_c;
  3969. c->put_tpel_pixels_tab[ 2] = put_tpel_pixels_mc20_c;
  3970. c->put_tpel_pixels_tab[ 4] = put_tpel_pixels_mc01_c;
  3971. c->put_tpel_pixels_tab[ 5] = put_tpel_pixels_mc11_c;
  3972. c->put_tpel_pixels_tab[ 6] = put_tpel_pixels_mc21_c;
  3973. c->put_tpel_pixels_tab[ 8] = put_tpel_pixels_mc02_c;
  3974. c->put_tpel_pixels_tab[ 9] = put_tpel_pixels_mc12_c;
  3975. c->put_tpel_pixels_tab[10] = put_tpel_pixels_mc22_c;
  3976. c->avg_tpel_pixels_tab[ 0] = avg_tpel_pixels_mc00_c;
  3977. c->avg_tpel_pixels_tab[ 1] = avg_tpel_pixels_mc10_c;
  3978. c->avg_tpel_pixels_tab[ 2] = avg_tpel_pixels_mc20_c;
  3979. c->avg_tpel_pixels_tab[ 4] = avg_tpel_pixels_mc01_c;
  3980. c->avg_tpel_pixels_tab[ 5] = avg_tpel_pixels_mc11_c;
  3981. c->avg_tpel_pixels_tab[ 6] = avg_tpel_pixels_mc21_c;
  3982. c->avg_tpel_pixels_tab[ 8] = avg_tpel_pixels_mc02_c;
  3983. c->avg_tpel_pixels_tab[ 9] = avg_tpel_pixels_mc12_c;
  3984. c->avg_tpel_pixels_tab[10] = avg_tpel_pixels_mc22_c;
  3985. #define dspfunc(PFX, IDX, NUM) \
  3986. c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_c; \
  3987. c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_c; \
  3988. c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_c; \
  3989. c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_c; \
  3990. c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_c; \
  3991. c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_c; \
  3992. c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_c; \
  3993. c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_c; \
  3994. c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_c; \
  3995. c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_c; \
  3996. c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_c; \
  3997. c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_c; \
  3998. c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_c; \
  3999. c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_c; \
  4000. c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_c; \
  4001. c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_c
  4002. dspfunc(put_qpel, 0, 16);
  4003. dspfunc(put_no_rnd_qpel, 0, 16);
  4004. dspfunc(avg_qpel, 0, 16);
  4005. /* dspfunc(avg_no_rnd_qpel, 0, 16); */
  4006. dspfunc(put_qpel, 1, 8);
  4007. dspfunc(put_no_rnd_qpel, 1, 8);
  4008. dspfunc(avg_qpel, 1, 8);
  4009. /* dspfunc(avg_no_rnd_qpel, 1, 8); */
  4010. dspfunc(put_h264_qpel, 0, 16);
  4011. dspfunc(put_h264_qpel, 1, 8);
  4012. dspfunc(put_h264_qpel, 2, 4);
  4013. dspfunc(put_h264_qpel, 3, 2);
  4014. dspfunc(avg_h264_qpel, 0, 16);
  4015. dspfunc(avg_h264_qpel, 1, 8);
  4016. dspfunc(avg_h264_qpel, 2, 4);
  4017. #undef dspfunc
  4018. c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_c;
  4019. c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_c;
  4020. c->put_h264_chroma_pixels_tab[2]= put_h264_chroma_mc2_c;
  4021. c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_c;
  4022. c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_c;
  4023. c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_c;
  4024. c->put_no_rnd_vc1_chroma_pixels_tab[0]= put_no_rnd_vc1_chroma_mc8_c;
  4025. c->avg_no_rnd_vc1_chroma_pixels_tab[0]= avg_no_rnd_vc1_chroma_mc8_c;
  4026. c->draw_edges = draw_edges_c;
  4027. #if CONFIG_CAVS_DECODER
  4028. ff_cavsdsp_init(c,avctx);
  4029. #endif
  4030. #if CONFIG_MLP_DECODER || CONFIG_TRUEHD_DECODER
  4031. ff_mlp_init(c, avctx);
  4032. #endif
  4033. #if CONFIG_VC1_DECODER
  4034. ff_vc1dsp_init(c,avctx);
  4035. #endif
  4036. #if CONFIG_WMV2_DECODER || CONFIG_VC1_DECODER
  4037. ff_intrax8dsp_init(c,avctx);
  4038. #endif
  4039. #if CONFIG_RV30_DECODER
  4040. ff_rv30dsp_init(c,avctx);
  4041. #endif
  4042. #if CONFIG_RV40_DECODER
  4043. ff_rv40dsp_init(c,avctx);
  4044. c->put_rv40_qpel_pixels_tab[0][15] = put_rv40_qpel16_mc33_c;
  4045. c->avg_rv40_qpel_pixels_tab[0][15] = avg_rv40_qpel16_mc33_c;
  4046. c->put_rv40_qpel_pixels_tab[1][15] = put_rv40_qpel8_mc33_c;
  4047. c->avg_rv40_qpel_pixels_tab[1][15] = avg_rv40_qpel8_mc33_c;
  4048. #endif
  4049. c->put_mspel_pixels_tab[0]= put_mspel8_mc00_c;
  4050. c->put_mspel_pixels_tab[1]= put_mspel8_mc10_c;
  4051. c->put_mspel_pixels_tab[2]= put_mspel8_mc20_c;
  4052. c->put_mspel_pixels_tab[3]= put_mspel8_mc30_c;
  4053. c->put_mspel_pixels_tab[4]= put_mspel8_mc02_c;
  4054. c->put_mspel_pixels_tab[5]= put_mspel8_mc12_c;
  4055. c->put_mspel_pixels_tab[6]= put_mspel8_mc22_c;
  4056. c->put_mspel_pixels_tab[7]= put_mspel8_mc32_c;
  4057. #define SET_CMP_FUNC(name) \
  4058. c->name[0]= name ## 16_c;\
  4059. c->name[1]= name ## 8x8_c;
  4060. SET_CMP_FUNC(hadamard8_diff)
  4061. c->hadamard8_diff[4]= hadamard8_intra16_c;
  4062. c->hadamard8_diff[5]= hadamard8_intra8x8_c;
  4063. SET_CMP_FUNC(dct_sad)
  4064. SET_CMP_FUNC(dct_max)
  4065. #if CONFIG_GPL
  4066. SET_CMP_FUNC(dct264_sad)
  4067. #endif
  4068. c->sad[0]= pix_abs16_c;
  4069. c->sad[1]= pix_abs8_c;
  4070. c->sse[0]= sse16_c;
  4071. c->sse[1]= sse8_c;
  4072. c->sse[2]= sse4_c;
  4073. SET_CMP_FUNC(quant_psnr)
  4074. SET_CMP_FUNC(rd)
  4075. SET_CMP_FUNC(bit)
  4076. c->vsad[0]= vsad16_c;
  4077. c->vsad[4]= vsad_intra16_c;
  4078. c->vsad[5]= vsad_intra8_c;
  4079. c->vsse[0]= vsse16_c;
  4080. c->vsse[4]= vsse_intra16_c;
  4081. c->vsse[5]= vsse_intra8_c;
  4082. c->nsse[0]= nsse16_c;
  4083. c->nsse[1]= nsse8_c;
  4084. #if CONFIG_DWT
  4085. ff_dsputil_init_dwt(c);
  4086. #endif
  4087. c->ssd_int8_vs_int16 = ssd_int8_vs_int16_c;
  4088. c->add_bytes= add_bytes_c;
  4089. c->add_bytes_l2= add_bytes_l2_c;
  4090. c->diff_bytes= diff_bytes_c;
  4091. c->add_hfyu_median_prediction= add_hfyu_median_prediction_c;
  4092. c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_c;
  4093. c->add_hfyu_left_prediction = add_hfyu_left_prediction_c;
  4094. c->add_hfyu_left_prediction_bgr32 = add_hfyu_left_prediction_bgr32_c;
  4095. c->bswap_buf= bswap_buf;
  4096. #if CONFIG_PNG_DECODER
  4097. c->add_png_paeth_prediction= ff_add_png_paeth_prediction;
  4098. #endif
  4099. if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
  4100. c->h263_h_loop_filter= h263_h_loop_filter_c;
  4101. c->h263_v_loop_filter= h263_v_loop_filter_c;
  4102. }
  4103. if (CONFIG_VP3_DECODER) {
  4104. c->vp3_h_loop_filter= ff_vp3_h_loop_filter_c;
  4105. c->vp3_v_loop_filter= ff_vp3_v_loop_filter_c;
  4106. }
  4107. if (CONFIG_VP6_DECODER) {
  4108. c->vp6_filter_diag4= ff_vp6_filter_diag4_c;
  4109. }
  4110. c->h261_loop_filter= h261_loop_filter_c;
  4111. c->try_8x8basis= try_8x8basis_c;
  4112. c->add_8x8basis= add_8x8basis_c;
  4113. #if CONFIG_VORBIS_DECODER
  4114. c->vorbis_inverse_coupling = vorbis_inverse_coupling;
  4115. #endif
  4116. #if CONFIG_AC3_DECODER
  4117. c->ac3_downmix = ff_ac3_downmix_c;
  4118. #endif
  4119. #if CONFIG_LPC
  4120. c->lpc_compute_autocorr = ff_lpc_compute_autocorr;
  4121. #endif
  4122. c->vector_fmul = vector_fmul_c;
  4123. c->vector_fmul_reverse = vector_fmul_reverse_c;
  4124. c->vector_fmul_add = vector_fmul_add_c;
  4125. c->vector_fmul_window = ff_vector_fmul_window_c;
  4126. c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_c;
  4127. c->vector_clipf = vector_clipf_c;
  4128. c->float_to_int16 = ff_float_to_int16_c;
  4129. c->float_to_int16_interleave = ff_float_to_int16_interleave_c;
  4130. c->scalarproduct_int16 = scalarproduct_int16_c;
  4131. c->scalarproduct_and_madd_int16 = scalarproduct_and_madd_int16_c;
  4132. c->scalarproduct_float = scalarproduct_float_c;
  4133. c->butterflies_float = butterflies_float_c;
  4134. c->vector_fmul_scalar = vector_fmul_scalar_c;
  4135. c->vector_fmul_sv_scalar[0] = vector_fmul_sv_scalar_2_c;
  4136. c->vector_fmul_sv_scalar[1] = vector_fmul_sv_scalar_4_c;
  4137. c->sv_fmul_scalar[0] = sv_fmul_scalar_2_c;
  4138. c->sv_fmul_scalar[1] = sv_fmul_scalar_4_c;
  4139. c->shrink[0]= ff_img_copy_plane;
  4140. c->shrink[1]= ff_shrink22;
  4141. c->shrink[2]= ff_shrink44;
  4142. c->shrink[3]= ff_shrink88;
  4143. c->prefetch= just_return;
  4144. memset(c->put_2tap_qpel_pixels_tab, 0, sizeof(c->put_2tap_qpel_pixels_tab));
  4145. memset(c->avg_2tap_qpel_pixels_tab, 0, sizeof(c->avg_2tap_qpel_pixels_tab));
  4146. if (HAVE_MMX) dsputil_init_mmx (c, avctx);
  4147. if (ARCH_ARM) dsputil_init_arm (c, avctx);
  4148. if (CONFIG_MLIB) dsputil_init_mlib (c, avctx);
  4149. if (HAVE_VIS) dsputil_init_vis (c, avctx);
  4150. if (ARCH_ALPHA) dsputil_init_alpha (c, avctx);
  4151. if (ARCH_PPC) dsputil_init_ppc (c, avctx);
  4152. if (HAVE_MMI) dsputil_init_mmi (c, avctx);
  4153. if (ARCH_SH4) dsputil_init_sh4 (c, avctx);
  4154. if (ARCH_BFIN) dsputil_init_bfin (c, avctx);
  4155. for(i=0; i<64; i++){
  4156. if(!c->put_2tap_qpel_pixels_tab[0][i])
  4157. c->put_2tap_qpel_pixels_tab[0][i]= c->put_h264_qpel_pixels_tab[0][i];
  4158. if(!c->avg_2tap_qpel_pixels_tab[0][i])
  4159. c->avg_2tap_qpel_pixels_tab[0][i]= c->avg_h264_qpel_pixels_tab[0][i];
  4160. }
  4161. switch(c->idct_permutation_type){
  4162. case FF_NO_IDCT_PERM:
  4163. for(i=0; i<64; i++)
  4164. c->idct_permutation[i]= i;
  4165. break;
  4166. case FF_LIBMPEG2_IDCT_PERM:
  4167. for(i=0; i<64; i++)
  4168. c->idct_permutation[i]= (i & 0x38) | ((i & 6) >> 1) | ((i & 1) << 2);
  4169. break;
  4170. case FF_SIMPLE_IDCT_PERM:
  4171. for(i=0; i<64; i++)
  4172. c->idct_permutation[i]= simple_mmx_permutation[i];
  4173. break;
  4174. case FF_TRANSPOSE_IDCT_PERM:
  4175. for(i=0; i<64; i++)
  4176. c->idct_permutation[i]= ((i&7)<<3) | (i>>3);
  4177. break;
  4178. case FF_PARTTRANS_IDCT_PERM:
  4179. for(i=0; i<64; i++)
  4180. c->idct_permutation[i]= (i&0x24) | ((i&3)<<3) | ((i>>3)&3);
  4181. break;
  4182. case FF_SSE2_IDCT_PERM:
  4183. for(i=0; i<64; i++)
  4184. c->idct_permutation[i]= (i&0x38) | idct_sse2_row_perm[i&7];
  4185. break;
  4186. default:
  4187. av_log(avctx, AV_LOG_ERROR, "Internal error, IDCT permutation not set\n");
  4188. }
  4189. }