You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

525 lines
25KB

  1. /*
  2. * Copyright (c) 2015 Henrik Gramner
  3. * Copyright (c) 2021 Josh Dekker
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along
  18. * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
  19. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  20. */
  21. #include <string.h>
  22. #include "checkasm.h"
  23. #include "libavcodec/hevcdsp.h"
  24. #include "libavutil/common.h"
  25. #include "libavutil/internal.h"
  26. #include "libavutil/intreadwrite.h"
  27. static const uint32_t pixel_mask[] = { 0xffffffff, 0x01ff01ff, 0x03ff03ff, 0x07ff07ff, 0x0fff0fff };
  28. static const uint32_t pixel_mask16[] = { 0x00ff00ff, 0x01ff01ff, 0x03ff03ff, 0x07ff07ff, 0x0fff0fff };
  29. static const int sizes[] = { -1, 4, 6, 8, 12, 16, 24, 32, 48, 64 };
  30. static const int weights[] = { 0, 128, 255, -1 };
  31. static const int denoms[] = {0, 7, 12, -1 };
  32. static const int offsets[] = {0, 255, -1 };
  33. #define SIZEOF_PIXEL ((bit_depth + 7) / 8)
  34. #define BUF_SIZE (2 * MAX_PB_SIZE * (2 * 4 + MAX_PB_SIZE))
  35. #define randomize_buffers() \
  36. do { \
  37. uint32_t mask = pixel_mask[bit_depth - 8]; \
  38. int k; \
  39. for (k = 0; k < BUF_SIZE; k += 4) { \
  40. uint32_t r = rnd() & mask; \
  41. AV_WN32A(buf0 + k, r); \
  42. AV_WN32A(buf1 + k, r); \
  43. r = rnd(); \
  44. AV_WN32A(dst0 + k, r); \
  45. AV_WN32A(dst1 + k, r); \
  46. } \
  47. } while (0)
  48. #define randomize_buffers_ref() \
  49. randomize_buffers(); \
  50. do { \
  51. uint32_t mask = pixel_mask16[bit_depth - 8]; \
  52. int k; \
  53. for (k = 0; k < BUF_SIZE; k += 2) { \
  54. uint32_t r = rnd() & mask; \
  55. AV_WN32A(ref0 + k, r); \
  56. AV_WN32A(ref1 + k, r); \
  57. } \
  58. } while (0)
  59. #define src0 (buf0 + 2 * 4 * MAX_PB_SIZE) /* hevc qpel functions read data from negative src pointer offsets */
  60. #define src1 (buf1 + 2 * 4 * MAX_PB_SIZE)
  61. void checkasm_check_hevc_qpel(void)
  62. {
  63. LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE]);
  64. LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE]);
  65. LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
  66. LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
  67. HEVCDSPContext h;
  68. int size, bit_depth, i, j, row;
  69. declare_func_emms(AV_CPU_FLAG_MMX | AV_CPU_FLAG_MMXEXT, void, int16_t *dst, uint8_t *src, ptrdiff_t srcstride,
  70. int height, intptr_t mx, intptr_t my, int width);
  71. for (bit_depth = 8; bit_depth <= 12; bit_depth++) {
  72. ff_hevc_dsp_init(&h, bit_depth);
  73. for (i = 0; i < 2; i++) {
  74. for (j = 0; j < 2; j++) {
  75. for (size = 1; size < 10; size++) {
  76. const char *type;
  77. switch ((j << 1) | i) {
  78. case 0: type = "pel_pixels"; break; // 0 0
  79. case 1: type = "qpel_h"; break; // 0 1
  80. case 2: type = "qpel_v"; break; // 1 0
  81. case 3: type = "qpel_hv"; break; // 1 1
  82. }
  83. if (check_func(h.put_hevc_qpel[size][j][i], "put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
  84. int16_t *dstw0 = (int16_t *) dst0, *dstw1 = (int16_t *) dst1;
  85. randomize_buffers();
  86. call_ref(dstw0, src0, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
  87. call_new(dstw1, src1, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
  88. for (row = 0; row < size[sizes]; row++) {
  89. if (memcmp(dstw0 + row * MAX_PB_SIZE, dstw1 + row * MAX_PB_SIZE, sizes[size] * SIZEOF_PIXEL))
  90. fail();
  91. }
  92. bench_new(dstw1, src1, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
  93. }
  94. }
  95. }
  96. }
  97. }
  98. report("qpel");
  99. }
  100. void checkasm_check_hevc_qpel_uni(void)
  101. {
  102. LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE]);
  103. LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE]);
  104. LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
  105. LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
  106. HEVCDSPContext h;
  107. int size, bit_depth, i, j;
  108. declare_func_emms(AV_CPU_FLAG_MMX | AV_CPU_FLAG_MMXEXT, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride,
  109. int height, intptr_t mx, intptr_t my, int width);
  110. for (bit_depth = 8; bit_depth <= 12; bit_depth++) {
  111. ff_hevc_dsp_init(&h, bit_depth);
  112. for (i = 0; i < 2; i++) {
  113. for (j = 0; j < 2; j++) {
  114. for (size = 1; size < 10; size++) {
  115. const char *type;
  116. switch ((j << 1) | i) {
  117. case 0: type = "pel_uni_pixels"; break; // 0 0
  118. case 1: type = "qpel_uni_h"; break; // 0 1
  119. case 2: type = "qpel_uni_v"; break; // 1 0
  120. case 3: type = "qpel_uni_hv"; break; // 1 1
  121. }
  122. if (check_func(h.put_hevc_qpel_uni[size][j][i], "put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
  123. randomize_buffers();
  124. call_ref(dst0, sizes[size] * SIZEOF_PIXEL, src0, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
  125. call_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
  126. if (memcmp(dst0, dst1, sizes[size] * sizes[size] * SIZEOF_PIXEL))
  127. fail();
  128. bench_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
  129. }
  130. }
  131. }
  132. }
  133. }
  134. report("qpel_uni");
  135. }
  136. void checkasm_check_hevc_qpel_uni_w(void)
  137. {
  138. LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE]);
  139. LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE]);
  140. LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
  141. LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
  142. HEVCDSPContext h;
  143. int size, bit_depth, i, j;
  144. const int *denom, *wx, *ox;
  145. declare_func_emms(AV_CPU_FLAG_MMX | AV_CPU_FLAG_MMXEXT, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride,
  146. int height, int denom, int wx, int ox, intptr_t mx, intptr_t my, int width);
  147. for (bit_depth = 8; bit_depth <= 12; bit_depth++) {
  148. ff_hevc_dsp_init(&h, bit_depth);
  149. for (i = 0; i < 2; i++) {
  150. for (j = 0; j < 2; j++) {
  151. for (size = 1; size < 10; size++) {
  152. const char *type;
  153. switch ((j << 1) | i) {
  154. case 0: type = "pel_uni_w_pixels"; break; // 0 0
  155. case 1: type = "qpel_uni_w_h"; break; // 0 1
  156. case 2: type = "qpel_uni_w_v"; break; // 1 0
  157. case 3: type = "qpel_uni_w_hv"; break; // 1 1
  158. }
  159. if (check_func(h.put_hevc_qpel_uni_w[size][j][i], "put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
  160. for (denom = denoms; *denom >= 0; denom++) {
  161. for (wx = weights; *wx >= 0; wx++) {
  162. for (ox = offsets; *ox >= 0; ox++) {
  163. randomize_buffers();
  164. call_ref(dst0, sizes[size] * SIZEOF_PIXEL, src0, sizes[size] * SIZEOF_PIXEL, sizes[size], *denom, *wx, *ox, i, j, sizes[size]);
  165. call_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, sizes[size], *denom, *wx, *ox, i, j, sizes[size]);
  166. if (memcmp(dst0, dst1, sizes[size] * sizes[size] * SIZEOF_PIXEL))
  167. fail();
  168. bench_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, sizes[size], *denom, *wx, *ox, i, j, sizes[size]);
  169. }
  170. }
  171. }
  172. }
  173. }
  174. }
  175. }
  176. }
  177. report("qpel_uni_w");
  178. }
  179. void checkasm_check_hevc_qpel_bi(void)
  180. {
  181. LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE]);
  182. LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE]);
  183. LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
  184. LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
  185. LOCAL_ALIGNED_32(int16_t, ref0, [BUF_SIZE]);
  186. LOCAL_ALIGNED_32(int16_t, ref1, [BUF_SIZE]);
  187. HEVCDSPContext h;
  188. int size, bit_depth, i, j;
  189. declare_func_emms(AV_CPU_FLAG_MMX | AV_CPU_FLAG_MMXEXT, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride,
  190. int16_t *src2,
  191. int height, intptr_t mx, intptr_t my, int width);
  192. for (bit_depth = 8; bit_depth <= 12; bit_depth++) {
  193. ff_hevc_dsp_init(&h, bit_depth);
  194. for (i = 0; i < 2; i++) {
  195. for (j = 0; j < 2; j++) {
  196. for (size = 1; size < 10; size++) {
  197. const char *type;
  198. switch ((j << 1) | i) {
  199. case 0: type = "pel_bi_pixels"; break; // 0 0
  200. case 1: type = "qpel_bi_h"; break; // 0 1
  201. case 2: type = "qpel_bi_v"; break; // 1 0
  202. case 3: type = "qpel_bi_hv"; break; // 1 1
  203. }
  204. if (check_func(h.put_hevc_qpel_bi[size][j][i], "put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
  205. randomize_buffers_ref();
  206. call_ref(dst0, sizes[size] * SIZEOF_PIXEL, src0, sizes[size] * SIZEOF_PIXEL, ref0, sizes[size], i, j, sizes[size]);
  207. call_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, ref1, sizes[size], i, j, sizes[size]);
  208. if (memcmp(dst0, dst1, sizes[size] * sizes[size] * SIZEOF_PIXEL))
  209. fail();
  210. bench_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, ref1, sizes[size], i, j, sizes[size]);
  211. }
  212. }
  213. }
  214. }
  215. }
  216. report("qpel_bi");
  217. }
  218. void checkasm_check_hevc_qpel_bi_w(void)
  219. {
  220. LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE]);
  221. LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE]);
  222. LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
  223. LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
  224. LOCAL_ALIGNED_32(int16_t, ref0, [BUF_SIZE]);
  225. LOCAL_ALIGNED_32(int16_t, ref1, [BUF_SIZE]);
  226. HEVCDSPContext h;
  227. int size, bit_depth, i, j;
  228. const int *denom, *wx, *ox;
  229. declare_func_emms(AV_CPU_FLAG_MMX | AV_CPU_FLAG_MMXEXT, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride,
  230. int16_t *src2,
  231. int height, int denom, int wx0, int wx1,
  232. int ox0, int ox1, intptr_t mx, intptr_t my, int width);
  233. for (bit_depth = 8; bit_depth <= 12; bit_depth++) {
  234. ff_hevc_dsp_init(&h, bit_depth);
  235. for (i = 0; i < 2; i++) {
  236. for (j = 0; j < 2; j++) {
  237. for (size = 1; size < 10; size++) {
  238. const char *type;
  239. switch ((j << 1) | i) {
  240. case 0: type = "pel_bi_w_pixels"; break; // 0 0
  241. case 1: type = "qpel_bi_w_h"; break; // 0 1
  242. case 2: type = "qpel_bi_w_v"; break; // 1 0
  243. case 3: type = "qpel_bi_w_hv"; break; // 1 1
  244. }
  245. if (check_func(h.put_hevc_qpel_bi_w[size][j][i], "put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
  246. for (denom = denoms; *denom >= 0; denom++) {
  247. for (wx = weights; *wx >= 0; wx++) {
  248. for (ox = offsets; *ox >= 0; ox++) {
  249. randomize_buffers_ref();
  250. call_ref(dst0, sizes[size] * SIZEOF_PIXEL, src0, sizes[size] * SIZEOF_PIXEL, ref0, sizes[size], *denom, *wx, *wx, *ox, *ox, i, j, sizes[size]);
  251. call_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, ref1, sizes[size], *denom, *wx, *wx, *ox, *ox, i, j, sizes[size]);
  252. if (memcmp(dst0, dst1, sizes[size] * sizes[size] * SIZEOF_PIXEL))
  253. fail();
  254. bench_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, ref1, sizes[size], *denom, *wx, *wx, *ox, *ox, i, j, sizes[size]);
  255. }
  256. }
  257. }
  258. }
  259. }
  260. }
  261. }
  262. }
  263. report("qpel_bi_w");
  264. }
  265. void checkasm_check_hevc_epel(void)
  266. {
  267. LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE]);
  268. LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE]);
  269. LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
  270. LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
  271. HEVCDSPContext h;
  272. int size, bit_depth, i, j, row;
  273. declare_func_emms(AV_CPU_FLAG_MMX | AV_CPU_FLAG_MMXEXT, void, int16_t *dst, uint8_t *src, ptrdiff_t srcstride,
  274. int height, intptr_t mx, intptr_t my, int width);
  275. for (bit_depth = 8; bit_depth <= 12; bit_depth++) {
  276. ff_hevc_dsp_init(&h, bit_depth);
  277. for (i = 0; i < 2; i++) {
  278. for (j = 0; j < 2; j++) {
  279. for (size = 1; size < 10; size++) {
  280. const char *type;
  281. switch ((j << 1) | i) {
  282. case 0: type = "pel_pixels"; break; // 0 0
  283. case 1: type = "epel_h"; break; // 0 1
  284. case 2: type = "epel_v"; break; // 1 0
  285. case 3: type = "epel_hv"; break; // 1 1
  286. }
  287. if (check_func(h.put_hevc_epel[size][j][i], "put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
  288. int16_t *dstw0 = (int16_t *) dst0, *dstw1 = (int16_t *) dst1;
  289. randomize_buffers();
  290. call_ref(dstw0, src0, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
  291. call_new(dstw1, src1, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
  292. for (row = 0; row < size[sizes]; row++) {
  293. if (memcmp(dstw0 + row * MAX_PB_SIZE, dstw1 + row * MAX_PB_SIZE, sizes[size] * SIZEOF_PIXEL))
  294. fail();
  295. }
  296. bench_new(dstw1, src1, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
  297. }
  298. }
  299. }
  300. }
  301. }
  302. report("epel");
  303. }
  304. void checkasm_check_hevc_epel_uni(void)
  305. {
  306. LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE]);
  307. LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE]);
  308. LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
  309. LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
  310. HEVCDSPContext h;
  311. int size, bit_depth, i, j;
  312. declare_func_emms(AV_CPU_FLAG_MMX | AV_CPU_FLAG_MMXEXT, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride,
  313. int height, intptr_t mx, intptr_t my, int width);
  314. for (bit_depth = 8; bit_depth <= 12; bit_depth++) {
  315. ff_hevc_dsp_init(&h, bit_depth);
  316. for (i = 0; i < 2; i++) {
  317. for (j = 0; j < 2; j++) {
  318. for (size = 1; size < 10; size++) {
  319. const char *type;
  320. switch ((j << 1) | i) {
  321. case 0: type = "pel_uni_pixels"; break; // 0 0
  322. case 1: type = "epel_uni_h"; break; // 0 1
  323. case 2: type = "epel_uni_v"; break; // 1 0
  324. case 3: type = "epel_uni_hv"; break; // 1 1
  325. }
  326. if (check_func(h.put_hevc_epel_uni[size][j][i], "put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
  327. randomize_buffers();
  328. call_ref(dst0, sizes[size] * SIZEOF_PIXEL, src0, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
  329. call_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
  330. if (memcmp(dst0, dst1, sizes[size] * sizes[size] * SIZEOF_PIXEL))
  331. fail();
  332. bench_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
  333. }
  334. }
  335. }
  336. }
  337. }
  338. report("epel_uni");
  339. }
  340. void checkasm_check_hevc_epel_uni_w(void)
  341. {
  342. LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE]);
  343. LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE]);
  344. LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
  345. LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
  346. HEVCDSPContext h;
  347. int size, bit_depth, i, j;
  348. const int *denom, *wx, *ox;
  349. declare_func_emms(AV_CPU_FLAG_MMX | AV_CPU_FLAG_MMXEXT, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride,
  350. int height, int denom, int wx, int ox, intptr_t mx, intptr_t my, int width);
  351. for (bit_depth = 8; bit_depth <= 12; bit_depth++) {
  352. ff_hevc_dsp_init(&h, bit_depth);
  353. for (i = 0; i < 2; i++) {
  354. for (j = 0; j < 2; j++) {
  355. for (size = 1; size < 10; size++) {
  356. const char *type;
  357. switch ((j << 1) | i) {
  358. case 0: type = "pel_uni_w_pixels"; break; // 0 0
  359. case 1: type = "epel_uni_w_h"; break; // 0 1
  360. case 2: type = "epel_uni_w_v"; break; // 1 0
  361. case 3: type = "epel_uni_w_hv"; break; // 1 1
  362. }
  363. if (check_func(h.put_hevc_epel_uni_w[size][j][i], "put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
  364. for (denom = denoms; *denom >= 0; denom++) {
  365. for (wx = weights; *wx >= 0; wx++) {
  366. for (ox = offsets; *ox >= 0; ox++) {
  367. randomize_buffers();
  368. call_ref(dst0, sizes[size] * SIZEOF_PIXEL, src0, sizes[size] * SIZEOF_PIXEL, sizes[size], *denom, *wx, *ox, i, j, sizes[size]);
  369. call_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, sizes[size], *denom, *wx, *ox, i, j, sizes[size]);
  370. if (memcmp(dst0, dst1, sizes[size] * sizes[size] * SIZEOF_PIXEL))
  371. fail();
  372. bench_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, sizes[size], *denom, *wx, *ox, i, j, sizes[size]);
  373. }
  374. }
  375. }
  376. }
  377. }
  378. }
  379. }
  380. }
  381. report("epel_uni_w");
  382. }
  383. void checkasm_check_hevc_epel_bi(void)
  384. {
  385. LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE]);
  386. LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE]);
  387. LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
  388. LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
  389. LOCAL_ALIGNED_32(int16_t, ref0, [BUF_SIZE]);
  390. LOCAL_ALIGNED_32(int16_t, ref1, [BUF_SIZE]);
  391. HEVCDSPContext h;
  392. int size, bit_depth, i, j;
  393. declare_func_emms(AV_CPU_FLAG_MMX | AV_CPU_FLAG_MMXEXT, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride,
  394. int16_t *src2,
  395. int height, intptr_t mx, intptr_t my, int width);
  396. for (bit_depth = 8; bit_depth <= 12; bit_depth++) {
  397. ff_hevc_dsp_init(&h, bit_depth);
  398. for (i = 0; i < 2; i++) {
  399. for (j = 0; j < 2; j++) {
  400. for (size = 1; size < 10; size++) {
  401. const char *type;
  402. switch ((j << 1) | i) {
  403. case 0: type = "pel_bi_pixels"; break; // 0 0
  404. case 1: type = "epel_bi_h"; break; // 0 1
  405. case 2: type = "epel_bi_v"; break; // 1 0
  406. case 3: type = "epel_bi_hv"; break; // 1 1
  407. }
  408. if (check_func(h.put_hevc_epel_bi[size][j][i], "put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
  409. randomize_buffers_ref();
  410. call_ref(dst0, sizes[size] * SIZEOF_PIXEL, src0, sizes[size] * SIZEOF_PIXEL, ref0, sizes[size], i, j, sizes[size]);
  411. call_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, ref1, sizes[size], i, j, sizes[size]);
  412. if (memcmp(dst0, dst1, sizes[size] * sizes[size] * SIZEOF_PIXEL))
  413. fail();
  414. bench_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, ref1, sizes[size], i, j, sizes[size]);
  415. }
  416. }
  417. }
  418. }
  419. }
  420. report("epel_bi");
  421. }
  422. void checkasm_check_hevc_epel_bi_w(void)
  423. {
  424. LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE]);
  425. LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE]);
  426. LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
  427. LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
  428. LOCAL_ALIGNED_32(int16_t, ref0, [BUF_SIZE]);
  429. LOCAL_ALIGNED_32(int16_t, ref1, [BUF_SIZE]);
  430. HEVCDSPContext h;
  431. int size, bit_depth, i, j;
  432. const int *denom, *wx, *ox;
  433. declare_func_emms(AV_CPU_FLAG_MMX | AV_CPU_FLAG_MMXEXT, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride,
  434. int16_t *src2,
  435. int height, int denom, int wx0, int wx1,
  436. int ox0, int ox1, intptr_t mx, intptr_t my, int width);
  437. for (bit_depth = 8; bit_depth <= 12; bit_depth++) {
  438. ff_hevc_dsp_init(&h, bit_depth);
  439. for (i = 0; i < 2; i++) {
  440. for (j = 0; j < 2; j++) {
  441. for (size = 1; size < 10; size++) {
  442. const char *type;
  443. switch ((j << 1) | i) {
  444. case 0: type = "pel_bi_w_pixels"; break; // 0 0
  445. case 1: type = "epel_bi_w_h"; break; // 0 1
  446. case 2: type = "epel_bi_w_v"; break; // 1 0
  447. case 3: type = "epel_bi_w_hv"; break; // 1 1
  448. }
  449. if (check_func(h.put_hevc_epel_bi_w[size][j][i], "put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
  450. for (denom = denoms; *denom >= 0; denom++) {
  451. for (wx = weights; *wx >= 0; wx++) {
  452. for (ox = offsets; *ox >= 0; ox++) {
  453. randomize_buffers_ref();
  454. call_ref(dst0, sizes[size] * SIZEOF_PIXEL, src0, sizes[size] * SIZEOF_PIXEL, ref0, sizes[size], *denom, *wx, *wx, *ox, *ox, i, j, sizes[size]);
  455. call_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, ref1, sizes[size], *denom, *wx, *wx, *ox, *ox, i, j, sizes[size]);
  456. if (memcmp(dst0, dst1, sizes[size] * sizes[size] * SIZEOF_PIXEL))
  457. fail();
  458. bench_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, ref1, sizes[size], *denom, *wx, *wx, *ox, *ox, i, j, sizes[size]);
  459. }
  460. }
  461. }
  462. }
  463. }
  464. }
  465. }
  466. }
  467. report("epel_bi_w");
  468. }