You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

454 lines
22KB

  1. /*
  2. * Copyright (c) 2016 Martin Storsjo
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License along
  17. * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
  18. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  19. */
  20. #include <string.h>
  21. #include "checkasm.h"
  22. #include "libavcodec/avcodec.h"
  23. #include "libavcodec/h264dsp.h"
  24. #include "libavcodec/h264data.h"
  25. #include "libavutil/common.h"
  26. #include "libavutil/internal.h"
  27. #include "libavutil/intreadwrite.h"
  28. #include "libavutil/mem_internal.h"
  29. static const uint32_t pixel_mask[3] = { 0xffffffff, 0x01ff01ff, 0x03ff03ff };
  30. static const uint32_t pixel_mask_lf[3] = { 0xff0fff0f, 0x01ff000f, 0x03ff000f };
  31. #define SIZEOF_PIXEL ((bit_depth + 7) / 8)
  32. #define SIZEOF_COEF (2 * ((bit_depth + 7) / 8))
  33. #define PIXEL_STRIDE 16
  34. #define randomize_buffers() \
  35. do { \
  36. int x, y; \
  37. uint32_t mask = pixel_mask[bit_depth - 8]; \
  38. for (y = 0; y < sz; y++) { \
  39. for (x = 0; x < PIXEL_STRIDE; x += 4) { \
  40. AV_WN32A(src + y * PIXEL_STRIDE + x, rnd() & mask); \
  41. AV_WN32A(dst + y * PIXEL_STRIDE + x, rnd() & mask); \
  42. } \
  43. for (x = 0; x < sz; x++) { \
  44. if (bit_depth == 8) { \
  45. coef[y * sz + x] = src[y * PIXEL_STRIDE + x] - \
  46. dst[y * PIXEL_STRIDE + x]; \
  47. } else { \
  48. ((int32_t *)coef)[y * sz + x] = \
  49. ((uint16_t *)src)[y * (PIXEL_STRIDE/2) + x] - \
  50. ((uint16_t *)dst)[y * (PIXEL_STRIDE/2) + x]; \
  51. } \
  52. } \
  53. } \
  54. } while (0)
  55. #define dct4x4_impl(size, dctcoef) \
  56. static void dct4x4_##size(dctcoef *coef) \
  57. { \
  58. int i, y, x; \
  59. dctcoef tmp[16]; \
  60. for (i = 0; i < 4; i++) { \
  61. const int z0 = coef[i*4 + 0] + coef[i*4 + 3]; \
  62. const int z1 = coef[i*4 + 1] + coef[i*4 + 2]; \
  63. const int z2 = coef[i*4 + 0] - coef[i*4 + 3]; \
  64. const int z3 = coef[i*4 + 1] - coef[i*4 + 2]; \
  65. tmp[i + 4*0] = z0 + z1; \
  66. tmp[i + 4*1] = 2*z2 + z3; \
  67. tmp[i + 4*2] = z0 - z1; \
  68. tmp[i + 4*3] = z2 - 2*z3; \
  69. } \
  70. for (i = 0; i < 4; i++) { \
  71. const int z0 = tmp[i*4 + 0] + tmp[i*4 + 3]; \
  72. const int z1 = tmp[i*4 + 1] + tmp[i*4 + 2]; \
  73. const int z2 = tmp[i*4 + 0] - tmp[i*4 + 3]; \
  74. const int z3 = tmp[i*4 + 1] - tmp[i*4 + 2]; \
  75. coef[i*4 + 0] = z0 + z1; \
  76. coef[i*4 + 1] = 2*z2 + z3; \
  77. coef[i*4 + 2] = z0 - z1; \
  78. coef[i*4 + 3] = z2 - 2*z3; \
  79. } \
  80. for (y = 0; y < 4; y++) { \
  81. for (x = 0; x < 4; x++) { \
  82. static const int scale[] = { 13107 * 10, 8066 * 13, 5243 * 16 }; \
  83. const int idx = (y & 1) + (x & 1); \
  84. coef[y*4 + x] = (coef[y*4 + x] * scale[idx] + (1 << 14)) >> 15; \
  85. } \
  86. } \
  87. }
  88. #define DCT8_1D(src, srcstride, dst, dststride) do { \
  89. const int a0 = (src)[srcstride * 0] + (src)[srcstride * 7]; \
  90. const int a1 = (src)[srcstride * 0] - (src)[srcstride * 7]; \
  91. const int a2 = (src)[srcstride * 1] + (src)[srcstride * 6]; \
  92. const int a3 = (src)[srcstride * 1] - (src)[srcstride * 6]; \
  93. const int a4 = (src)[srcstride * 2] + (src)[srcstride * 5]; \
  94. const int a5 = (src)[srcstride * 2] - (src)[srcstride * 5]; \
  95. const int a6 = (src)[srcstride * 3] + (src)[srcstride * 4]; \
  96. const int a7 = (src)[srcstride * 3] - (src)[srcstride * 4]; \
  97. const int b0 = a0 + a6; \
  98. const int b1 = a2 + a4; \
  99. const int b2 = a0 - a6; \
  100. const int b3 = a2 - a4; \
  101. const int b4 = a3 + a5 + (a1 + (a1 >> 1)); \
  102. const int b5 = a1 - a7 - (a5 + (a5 >> 1)); \
  103. const int b6 = a1 + a7 - (a3 + (a3 >> 1)); \
  104. const int b7 = a3 - a5 + (a7 + (a7 >> 1)); \
  105. (dst)[dststride * 0] = b0 + b1; \
  106. (dst)[dststride * 1] = b4 + (b7 >> 2); \
  107. (dst)[dststride * 2] = b2 + (b3 >> 1); \
  108. (dst)[dststride * 3] = b5 + (b6 >> 2); \
  109. (dst)[dststride * 4] = b0 - b1; \
  110. (dst)[dststride * 5] = b6 - (b5 >> 2); \
  111. (dst)[dststride * 6] = (b2 >> 1) - b3; \
  112. (dst)[dststride * 7] = (b4 >> 2) - b7; \
  113. } while (0)
  114. #define dct8x8_impl(size, dctcoef) \
  115. static void dct8x8_##size(dctcoef *coef) \
  116. { \
  117. int i, x, y; \
  118. dctcoef tmp[64]; \
  119. for (i = 0; i < 8; i++) \
  120. DCT8_1D(coef + i, 8, tmp + i, 8); \
  121. \
  122. for (i = 0; i < 8; i++) \
  123. DCT8_1D(tmp + 8*i, 1, coef + i, 8); \
  124. \
  125. for (y = 0; y < 8; y++) { \
  126. for (x = 0; x < 8; x++) { \
  127. static const int scale[] = { \
  128. 13107 * 20, 11428 * 18, 20972 * 32, \
  129. 12222 * 19, 16777 * 25, 15481 * 24, \
  130. }; \
  131. static const int idxmap[] = { \
  132. 0, 3, 4, 3, \
  133. 3, 1, 5, 1, \
  134. 4, 5, 2, 5, \
  135. 3, 1, 5, 1, \
  136. }; \
  137. const int idx = idxmap[(y & 3) * 4 + (x & 3)]; \
  138. coef[y*8 + x] = ((int64_t)coef[y*8 + x] * \
  139. scale[idx] + (1 << 17)) >> 18; \
  140. } \
  141. } \
  142. }
  143. dct4x4_impl(16, int16_t)
  144. dct4x4_impl(32, int32_t)
  145. dct8x8_impl(16, int16_t)
  146. dct8x8_impl(32, int32_t)
  147. static void dct4x4(int16_t *coef, int bit_depth)
  148. {
  149. if (bit_depth == 8)
  150. dct4x4_16(coef);
  151. else
  152. dct4x4_32((int32_t *) coef);
  153. }
  154. static void dct8x8(int16_t *coef, int bit_depth)
  155. {
  156. if (bit_depth == 8) {
  157. dct8x8_16(coef);
  158. } else {
  159. dct8x8_32((int32_t *) coef);
  160. }
  161. }
  162. static void check_idct(void)
  163. {
  164. LOCAL_ALIGNED_16(uint8_t, src, [8 * 8 * 2]);
  165. LOCAL_ALIGNED_16(uint8_t, dst, [8 * 8 * 2]);
  166. LOCAL_ALIGNED_16(uint8_t, dst0, [8 * 8 * 2]);
  167. LOCAL_ALIGNED_16(uint8_t, dst1_base, [8 * 8 * 2 + 32]);
  168. LOCAL_ALIGNED_16(int16_t, coef, [8 * 8 * 2]);
  169. LOCAL_ALIGNED_16(int16_t, subcoef0, [8 * 8 * 2]);
  170. LOCAL_ALIGNED_16(int16_t, subcoef1, [8 * 8 * 2]);
  171. H264DSPContext h;
  172. int bit_depth, sz, align, dc;
  173. declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *dst, int16_t *block, int stride);
  174. for (bit_depth = 8; bit_depth <= 10; bit_depth++) {
  175. ff_h264dsp_init(&h, bit_depth, 1);
  176. for (sz = 4; sz <= 8; sz += 4) {
  177. randomize_buffers();
  178. if (sz == 4)
  179. dct4x4(coef, bit_depth);
  180. else
  181. dct8x8(coef, bit_depth);
  182. for (dc = 0; dc <= 1; dc++) {
  183. void (*idct)(uint8_t *, int16_t *, int) = NULL;
  184. switch ((sz << 1) | dc) {
  185. case (4 << 1) | 0: idct = h.h264_idct_add; break;
  186. case (4 << 1) | 1: idct = h.h264_idct_dc_add; break;
  187. case (8 << 1) | 0: idct = h.h264_idct8_add; break;
  188. case (8 << 1) | 1: idct = h.h264_idct8_dc_add; break;
  189. }
  190. if (check_func(idct, "h264_idct%d_add%s_%dbpp", sz, dc ? "_dc" : "", bit_depth)) {
  191. for (align = 0; align < 16; align += sz * SIZEOF_PIXEL) {
  192. uint8_t *dst1 = dst1_base + align;
  193. if (dc) {
  194. memset(subcoef0, 0, sz * sz * SIZEOF_COEF);
  195. memcpy(subcoef0, coef, SIZEOF_COEF);
  196. } else {
  197. memcpy(subcoef0, coef, sz * sz * SIZEOF_COEF);
  198. }
  199. memcpy(dst0, dst, sz * PIXEL_STRIDE);
  200. memcpy(dst1, dst, sz * PIXEL_STRIDE);
  201. memcpy(subcoef1, subcoef0, sz * sz * SIZEOF_COEF);
  202. call_ref(dst0, subcoef0, PIXEL_STRIDE);
  203. call_new(dst1, subcoef1, PIXEL_STRIDE);
  204. if (memcmp(dst0, dst1, sz * PIXEL_STRIDE) ||
  205. memcmp(subcoef0, subcoef1, sz * sz * SIZEOF_COEF))
  206. fail();
  207. bench_new(dst1, subcoef1, sz * SIZEOF_PIXEL);
  208. }
  209. }
  210. }
  211. }
  212. }
  213. }
  214. static void check_idct_multiple(void)
  215. {
  216. LOCAL_ALIGNED_16(uint8_t, dst_full, [16 * 16 * 2]);
  217. LOCAL_ALIGNED_16(int16_t, coef_full, [16 * 16 * 2]);
  218. LOCAL_ALIGNED_16(uint8_t, dst0, [16 * 16 * 2]);
  219. LOCAL_ALIGNED_16(uint8_t, dst1, [16 * 16 * 2]);
  220. LOCAL_ALIGNED_16(int16_t, coef0, [16 * 16 * 2]);
  221. LOCAL_ALIGNED_16(int16_t, coef1, [16 * 16 * 2]);
  222. LOCAL_ALIGNED_16(uint8_t, nnzc, [15 * 8]);
  223. H264DSPContext h;
  224. int bit_depth, i, y, func;
  225. declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *dst, const int *block_offset, int16_t *block, int stride, const uint8_t nnzc[15*8]);
  226. for (bit_depth = 8; bit_depth <= 10; bit_depth++) {
  227. ff_h264dsp_init(&h, bit_depth, 1);
  228. for (func = 0; func < 3; func++) {
  229. void (*idct)(uint8_t *, const int *, int16_t *, int, const uint8_t[]) = NULL;
  230. const char *name;
  231. int sz = 4, intra = 0;
  232. int block_offset[16] = { 0 };
  233. switch (func) {
  234. case 0:
  235. idct = h.h264_idct_add16;
  236. name = "h264_idct_add16";
  237. break;
  238. case 1:
  239. idct = h.h264_idct_add16intra;
  240. name = "h264_idct_add16intra";
  241. intra = 1;
  242. break;
  243. case 2:
  244. idct = h.h264_idct8_add4;
  245. name = "h264_idct8_add4";
  246. sz = 8;
  247. break;
  248. }
  249. memset(nnzc, 0, 15 * 8);
  250. memset(coef_full, 0, 16 * 16 * SIZEOF_COEF);
  251. for (i = 0; i < 16 * 16; i += sz * sz) {
  252. uint8_t src[8 * 8 * 2];
  253. uint8_t dst[8 * 8 * 2];
  254. int16_t coef[8 * 8 * 2];
  255. int index = i / sz;
  256. int block_y = (index / 16) * sz;
  257. int block_x = index % 16;
  258. int offset = (block_y * 16 + block_x) * SIZEOF_PIXEL;
  259. int nnz = rnd() % 3;
  260. randomize_buffers();
  261. if (sz == 4)
  262. dct4x4(coef, bit_depth);
  263. else
  264. dct8x8(coef, bit_depth);
  265. for (y = 0; y < sz; y++)
  266. memcpy(&dst_full[offset + y * 16 * SIZEOF_PIXEL],
  267. &dst[PIXEL_STRIDE * y], sz * SIZEOF_PIXEL);
  268. if (nnz > 1)
  269. nnz = sz * sz;
  270. memcpy(&coef_full[i * SIZEOF_COEF/sizeof(coef[0])],
  271. coef, nnz * SIZEOF_COEF);
  272. if (intra && nnz == 1)
  273. nnz = 0;
  274. nnzc[scan8[i / 16]] = nnz;
  275. block_offset[i / 16] = offset;
  276. }
  277. if (check_func(idct, "%s_%dbpp", name, bit_depth)) {
  278. memcpy(coef0, coef_full, 16 * 16 * SIZEOF_COEF);
  279. memcpy(coef1, coef_full, 16 * 16 * SIZEOF_COEF);
  280. memcpy(dst0, dst_full, 16 * 16 * SIZEOF_PIXEL);
  281. memcpy(dst1, dst_full, 16 * 16 * SIZEOF_PIXEL);
  282. call_ref(dst0, block_offset, coef0, 16 * SIZEOF_PIXEL, nnzc);
  283. call_new(dst1, block_offset, coef1, 16 * SIZEOF_PIXEL, nnzc);
  284. if (memcmp(dst0, dst1, 16 * 16 * SIZEOF_PIXEL) ||
  285. memcmp(coef0, coef1, 16 * 16 * SIZEOF_COEF))
  286. fail();
  287. bench_new(dst1, block_offset, coef1, 16 * SIZEOF_PIXEL, nnzc);
  288. }
  289. }
  290. }
  291. }
  292. static void check_loop_filter(void)
  293. {
  294. LOCAL_ALIGNED_16(uint8_t, dst, [32 * 16 * 2]);
  295. LOCAL_ALIGNED_16(uint8_t, dst0, [32 * 16 * 2]);
  296. LOCAL_ALIGNED_16(uint8_t, dst1, [32 * 16 * 2]);
  297. H264DSPContext h;
  298. int bit_depth;
  299. int alphas[36], betas[36];
  300. int8_t tc0[36][4];
  301. declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *pix, ptrdiff_t stride,
  302. int alpha, int beta, int8_t *tc0);
  303. for (bit_depth = 8; bit_depth <= 10; bit_depth++) {
  304. int i, j, a, c;
  305. uint32_t mask = pixel_mask_lf[bit_depth - 8];
  306. ff_h264dsp_init(&h, bit_depth, 1);
  307. for (i = 35, a = 255, c = 250; i >= 0; i--) {
  308. alphas[i] = a << (bit_depth - 8);
  309. betas[i] = (i + 1) / 2 << (bit_depth - 8);
  310. tc0[i][0] = tc0[i][3] = (c + 6) / 10;
  311. tc0[i][1] = (c + 7) / 15;
  312. tc0[i][2] = (c + 9) / 20;
  313. a = a*9/10;
  314. c = c*9/10;
  315. }
  316. #define CHECK_LOOP_FILTER(name, align, idc) \
  317. do { \
  318. if (check_func(h.name, #name #idc "_%dbpp", bit_depth)) { \
  319. for (j = 0; j < 36; j++) { \
  320. intptr_t off = 8 * 32 + (j & 15) * 4 * !align; \
  321. for (i = 0; i < 1024; i+=4) { \
  322. AV_WN32A(dst + i, rnd() & mask); \
  323. } \
  324. memcpy(dst0, dst, 32 * 16 * 2); \
  325. memcpy(dst1, dst, 32 * 16 * 2); \
  326. \
  327. call_ref(dst0 + off, 32, alphas[j], betas[j], tc0[j]); \
  328. call_new(dst1 + off, 32, alphas[j], betas[j], tc0[j]); \
  329. if (memcmp(dst0, dst1, 32 * 16 * SIZEOF_PIXEL)) { \
  330. fprintf(stderr, #name #idc ": j:%d, alpha:%d beta:%d " \
  331. "tc0:{%d,%d,%d,%d}\n", j, alphas[j], betas[j], \
  332. tc0[j][0], tc0[j][1], tc0[j][2], tc0[j][3]); \
  333. fail(); \
  334. } \
  335. bench_new(dst1, 32, alphas[j], betas[j], tc0[j]); \
  336. } \
  337. } \
  338. } while (0)
  339. CHECK_LOOP_FILTER(h264_v_loop_filter_luma, 1,);
  340. CHECK_LOOP_FILTER(h264_h_loop_filter_luma, 0,);
  341. CHECK_LOOP_FILTER(h264_h_loop_filter_luma_mbaff, 0,);
  342. CHECK_LOOP_FILTER(h264_v_loop_filter_chroma, 1,);
  343. CHECK_LOOP_FILTER(h264_h_loop_filter_chroma, 0,);
  344. CHECK_LOOP_FILTER(h264_h_loop_filter_chroma_mbaff, 0,);
  345. ff_h264dsp_init(&h, bit_depth, 2);
  346. CHECK_LOOP_FILTER(h264_h_loop_filter_chroma, 0, 422);
  347. CHECK_LOOP_FILTER(h264_h_loop_filter_chroma_mbaff, 0, 422);
  348. #undef CHECK_LOOP_FILTER
  349. }
  350. }
  351. static void check_loop_filter_intra(void)
  352. {
  353. LOCAL_ALIGNED_16(uint8_t, dst, [32 * 16 * 2]);
  354. LOCAL_ALIGNED_16(uint8_t, dst0, [32 * 16 * 2]);
  355. LOCAL_ALIGNED_16(uint8_t, dst1, [32 * 16 * 2]);
  356. H264DSPContext h;
  357. int bit_depth;
  358. int alphas[36], betas[36];
  359. declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *pix, ptrdiff_t stride,
  360. int alpha, int beta);
  361. for (bit_depth = 8; bit_depth <= 10; bit_depth++) {
  362. int i, j, a;
  363. uint32_t mask = pixel_mask_lf[bit_depth - 8];
  364. ff_h264dsp_init(&h, bit_depth, 1);
  365. for (i = 35, a = 255; i >= 0; i--) {
  366. alphas[i] = a << (bit_depth - 8);
  367. betas[i] = (i + 1) / 2 << (bit_depth - 8);
  368. a = a*9/10;
  369. }
  370. #define CHECK_LOOP_FILTER(name, align, idc) \
  371. do { \
  372. if (check_func(h.name, #name #idc "_%dbpp", bit_depth)) { \
  373. for (j = 0; j < 36; j++) { \
  374. intptr_t off = 8 * 32 + (j & 15) * 4 * !align; \
  375. for (i = 0; i < 1024; i+=4) { \
  376. AV_WN32A(dst + i, rnd() & mask); \
  377. } \
  378. memcpy(dst0, dst, 32 * 16 * 2); \
  379. memcpy(dst1, dst, 32 * 16 * 2); \
  380. \
  381. call_ref(dst0 + off, 32, alphas[j], betas[j]); \
  382. call_new(dst1 + off, 32, alphas[j], betas[j]); \
  383. if (memcmp(dst0, dst1, 32 * 16 * SIZEOF_PIXEL)) { \
  384. fprintf(stderr, #name #idc ": j:%d, alpha:%d beta:%d\n", \
  385. j, alphas[j], betas[j]); \
  386. fail(); \
  387. } \
  388. bench_new(dst1, 32, alphas[j], betas[j]); \
  389. } \
  390. } \
  391. } while (0)
  392. CHECK_LOOP_FILTER(h264_v_loop_filter_luma_intra, 1,);
  393. CHECK_LOOP_FILTER(h264_h_loop_filter_luma_intra, 0,);
  394. CHECK_LOOP_FILTER(h264_h_loop_filter_luma_mbaff_intra, 0,);
  395. CHECK_LOOP_FILTER(h264_v_loop_filter_chroma_intra, 1,);
  396. CHECK_LOOP_FILTER(h264_h_loop_filter_chroma_intra, 0,);
  397. CHECK_LOOP_FILTER(h264_h_loop_filter_chroma_mbaff_intra, 0,);
  398. ff_h264dsp_init(&h, bit_depth, 2);
  399. CHECK_LOOP_FILTER(h264_h_loop_filter_chroma_intra, 0, 422);
  400. CHECK_LOOP_FILTER(h264_h_loop_filter_chroma_mbaff_intra, 0, 422);
  401. #undef CHECK_LOOP_FILTER
  402. }
  403. }
  404. void checkasm_check_h264dsp(void)
  405. {
  406. check_idct();
  407. check_idct_multiple();
  408. report("idct");
  409. check_loop_filter();
  410. report("loop_filter");
  411. check_loop_filter_intra();
  412. report("loop_filter_intra");
  413. }