You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

724 lines
21KB

  1. /*
  2. * Copyright (c) 2015 Shivraj Patil (Shivraj.Patil@imgtec.com)
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/mips/generic_macros_msa.h"
  21. #include "h264dsp_mips.h"
  22. static void intra_predict_vert_8x8_msa(uint8_t *src, uint8_t *dst,
  23. int32_t dst_stride)
  24. {
  25. uint32_t row;
  26. uint32_t src_data1, src_data2;
  27. src_data1 = LW(src);
  28. src_data2 = LW(src + 4);
  29. for (row = 8; row--;) {
  30. SW(src_data1, dst);
  31. SW(src_data2, (dst + 4));
  32. dst += dst_stride;
  33. }
  34. }
  35. static void intra_predict_vert_16x16_msa(uint8_t *src, uint8_t *dst,
  36. int32_t dst_stride)
  37. {
  38. uint32_t row;
  39. v16u8 src0;
  40. src0 = LD_UB(src);
  41. for (row = 16; row--;) {
  42. ST_UB(src0, dst);
  43. dst += dst_stride;
  44. }
  45. }
  46. static void intra_predict_horiz_8x8_msa(uint8_t *src, int32_t src_stride,
  47. uint8_t *dst, int32_t dst_stride)
  48. {
  49. uint64_t out0, out1, out2, out3, out4, out5, out6, out7;
  50. out0 = src[0 * src_stride] * 0x0101010101010101;
  51. out1 = src[1 * src_stride] * 0x0101010101010101;
  52. out2 = src[2 * src_stride] * 0x0101010101010101;
  53. out3 = src[3 * src_stride] * 0x0101010101010101;
  54. out4 = src[4 * src_stride] * 0x0101010101010101;
  55. out5 = src[5 * src_stride] * 0x0101010101010101;
  56. out6 = src[6 * src_stride] * 0x0101010101010101;
  57. out7 = src[7 * src_stride] * 0x0101010101010101;
  58. SD4(out0, out1, out2, out3, dst, dst_stride);
  59. dst += (4 * dst_stride);
  60. SD4(out4, out5, out6, out7, dst, dst_stride);
  61. }
  62. static void intra_predict_horiz_16x16_msa(uint8_t *src, int32_t src_stride,
  63. uint8_t *dst, int32_t dst_stride)
  64. {
  65. uint32_t row;
  66. uint8_t inp0, inp1, inp2, inp3;
  67. v16u8 src0, src1, src2, src3;
  68. for (row = 4; row--;) {
  69. inp0 = src[0];
  70. src += src_stride;
  71. inp1 = src[0];
  72. src += src_stride;
  73. inp2 = src[0];
  74. src += src_stride;
  75. inp3 = src[0];
  76. src += src_stride;
  77. src0 = (v16u8) __msa_fill_b(inp0);
  78. src1 = (v16u8) __msa_fill_b(inp1);
  79. src2 = (v16u8) __msa_fill_b(inp2);
  80. src3 = (v16u8) __msa_fill_b(inp3);
  81. ST_UB4(src0, src1, src2, src3, dst, dst_stride);
  82. dst += (4 * dst_stride);
  83. }
  84. }
  85. static void intra_predict_dc_8x8_msa(uint8_t *src_top, uint8_t *src_left,
  86. int32_t src_stride_left,
  87. uint8_t *dst, int32_t dst_stride,
  88. uint8_t is_above, uint8_t is_left)
  89. {
  90. uint32_t row;
  91. uint32_t out, addition = 0;
  92. v16u8 src_above, store;
  93. v8u16 sum_above;
  94. v4u32 sum_top;
  95. v2u64 sum;
  96. if (is_left && is_above) {
  97. src_above = LD_UB(src_top);
  98. sum_above = __msa_hadd_u_h(src_above, src_above);
  99. sum_top = __msa_hadd_u_w(sum_above, sum_above);
  100. sum = __msa_hadd_u_d(sum_top, sum_top);
  101. addition = __msa_copy_u_w((v4i32) sum, 0);
  102. for (row = 0; row < 8; row++) {
  103. addition += src_left[row * src_stride_left];
  104. }
  105. addition = (addition + 8) >> 4;
  106. store = (v16u8) __msa_fill_b(addition);
  107. } else if (is_left) {
  108. for (row = 0; row < 8; row++) {
  109. addition += src_left[row * src_stride_left];
  110. }
  111. addition = (addition + 4) >> 3;
  112. store = (v16u8) __msa_fill_b(addition);
  113. } else if (is_above) {
  114. src_above = LD_UB(src_top);
  115. sum_above = __msa_hadd_u_h(src_above, src_above);
  116. sum_top = __msa_hadd_u_w(sum_above, sum_above);
  117. sum = __msa_hadd_u_d(sum_top, sum_top);
  118. sum = (v2u64) __msa_srari_d((v2i64) sum, 3);
  119. store = (v16u8) __msa_splati_b((v16i8) sum, 0);
  120. } else {
  121. store = (v16u8) __msa_ldi_b(128);
  122. }
  123. out = __msa_copy_u_w((v4i32) store, 0);
  124. for (row = 8; row--;) {
  125. SW(out, dst);
  126. SW(out, (dst + 4));
  127. dst += dst_stride;
  128. }
  129. }
  130. static void intra_predict_dc_16x16_msa(uint8_t *src_top, uint8_t *src_left,
  131. int32_t src_stride_left,
  132. uint8_t *dst, int32_t dst_stride,
  133. uint8_t is_above, uint8_t is_left)
  134. {
  135. uint32_t row;
  136. uint32_t addition = 0;
  137. v16u8 src_above, store;
  138. v8u16 sum_above;
  139. v4u32 sum_top;
  140. v2u64 sum;
  141. if (is_left && is_above) {
  142. src_above = LD_UB(src_top);
  143. sum_above = __msa_hadd_u_h(src_above, src_above);
  144. sum_top = __msa_hadd_u_w(sum_above, sum_above);
  145. sum = __msa_hadd_u_d(sum_top, sum_top);
  146. sum_top = (v4u32) __msa_pckev_w((v4i32) sum, (v4i32) sum);
  147. sum = __msa_hadd_u_d(sum_top, sum_top);
  148. addition = __msa_copy_u_w((v4i32) sum, 0);
  149. for (row = 0; row < 16; row++) {
  150. addition += src_left[row * src_stride_left];
  151. }
  152. addition = (addition + 16) >> 5;
  153. store = (v16u8) __msa_fill_b(addition);
  154. } else if (is_left) {
  155. for (row = 0; row < 16; row++) {
  156. addition += src_left[row * src_stride_left];
  157. }
  158. addition = (addition + 8) >> 4;
  159. store = (v16u8) __msa_fill_b(addition);
  160. } else if (is_above) {
  161. src_above = LD_UB(src_top);
  162. sum_above = __msa_hadd_u_h(src_above, src_above);
  163. sum_top = __msa_hadd_u_w(sum_above, sum_above);
  164. sum = __msa_hadd_u_d(sum_top, sum_top);
  165. sum_top = (v4u32) __msa_pckev_w((v4i32) sum, (v4i32) sum);
  166. sum = __msa_hadd_u_d(sum_top, sum_top);
  167. sum = (v2u64) __msa_srari_d((v2i64) sum, 4);
  168. store = (v16u8) __msa_splati_b((v16i8) sum, 0);
  169. } else {
  170. store = (v16u8) __msa_ldi_b(128);
  171. }
  172. for (row = 16; row--;) {
  173. ST_UB(store, dst);
  174. dst += dst_stride;
  175. }
  176. }
  177. #define INTRA_PREDICT_VALDC_8X8_MSA(val) \
  178. static void intra_predict_##val##dc_8x8_msa(uint8_t *dst, \
  179. int32_t dst_stride) \
  180. { \
  181. uint32_t row, out; \
  182. v16i8 store; \
  183. \
  184. store = __msa_ldi_b(val); \
  185. out = __msa_copy_u_w((v4i32) store, 0); \
  186. \
  187. for (row = 8; row--;) { \
  188. SW(out, dst); \
  189. SW(out, (dst + 4)); \
  190. dst += dst_stride; \
  191. } \
  192. }
  193. INTRA_PREDICT_VALDC_8X8_MSA(127);
  194. INTRA_PREDICT_VALDC_8X8_MSA(129);
  195. #define INTRA_PREDICT_VALDC_16X16_MSA(val) \
  196. static void intra_predict_##val##dc_16x16_msa(uint8_t *dst, \
  197. int32_t dst_stride) \
  198. { \
  199. uint32_t row; \
  200. v16u8 store; \
  201. \
  202. store = (v16u8) __msa_ldi_b(val); \
  203. \
  204. for (row = 16; row--;) { \
  205. ST_UB(store, dst); \
  206. dst += dst_stride; \
  207. } \
  208. }
  209. INTRA_PREDICT_VALDC_16X16_MSA(127);
  210. INTRA_PREDICT_VALDC_16X16_MSA(129);
  211. static void intra_predict_plane_8x8_msa(uint8_t *src, int32_t stride)
  212. {
  213. uint8_t lpcnt;
  214. int32_t res, res0, res1, res2, res3;
  215. uint64_t out0, out1;
  216. v16i8 shf_mask = { 3, 5, 2, 6, 1, 7, 0, 8, 3, 5, 2, 6, 1, 7, 0, 8 };
  217. v8i16 short_multiplier = { 1, 2, 3, 4, 1, 2, 3, 4 };
  218. v4i32 int_multiplier = { 0, 1, 2, 3 };
  219. v16u8 src_top;
  220. v8i16 vec9, vec10, vec11;
  221. v4i32 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8;
  222. v2i64 sum;
  223. src_top = LD_UB(src - (stride + 1));
  224. src_top = (v16u8) __msa_vshf_b(shf_mask, (v16i8) src_top, (v16i8) src_top);
  225. vec9 = __msa_hsub_u_h(src_top, src_top);
  226. vec9 *= short_multiplier;
  227. vec8 = __msa_hadd_s_w(vec9, vec9);
  228. sum = __msa_hadd_s_d(vec8, vec8);
  229. res0 = __msa_copy_s_w((v4i32) sum, 0);
  230. res1 = (src[4 * stride - 1] - src[2 * stride - 1]) +
  231. 2 * (src[5 * stride - 1] - src[stride - 1]) +
  232. 3 * (src[6 * stride - 1] - src[-1]) +
  233. 4 * (src[7 * stride - 1] - src[-stride - 1]);
  234. res0 *= 17;
  235. res1 *= 17;
  236. res0 = (res0 + 16) >> 5;
  237. res1 = (res1 + 16) >> 5;
  238. res3 = 3 * (res0 + res1);
  239. res2 = 16 * (src[7 * stride - 1] + src[-stride + 7] + 1);
  240. res = res2 - res3;
  241. vec8 = __msa_fill_w(res0);
  242. vec4 = __msa_fill_w(res);
  243. vec2 = __msa_fill_w(res1);
  244. vec5 = vec8 * int_multiplier;
  245. vec3 = vec8 * 4;
  246. for (lpcnt = 4; lpcnt--;) {
  247. vec0 = vec5;
  248. vec0 += vec4;
  249. vec1 = vec0 + vec3;
  250. vec6 = vec5;
  251. vec4 += vec2;
  252. vec6 += vec4;
  253. vec7 = vec6 + vec3;
  254. SRA_4V(vec0, vec1, vec6, vec7, 5);
  255. PCKEV_H2_SH(vec1, vec0, vec7, vec6, vec10, vec11);
  256. CLIP_SH2_0_255(vec10, vec11);
  257. PCKEV_B2_SH(vec10, vec10, vec11, vec11, vec10, vec11);
  258. out0 = __msa_copy_s_d((v2i64) vec10, 0);
  259. out1 = __msa_copy_s_d((v2i64) vec11, 0);
  260. SD(out0, src);
  261. src += stride;
  262. SD(out1, src);
  263. src += stride;
  264. vec4 += vec2;
  265. }
  266. }
  267. static void intra_predict_plane_16x16_msa(uint8_t *src, int32_t stride)
  268. {
  269. uint8_t lpcnt;
  270. int32_t res0, res1, res2, res3;
  271. uint64_t load0, load1;
  272. v16i8 shf_mask = { 7, 8, 6, 9, 5, 10, 4, 11, 3, 12, 2, 13, 1, 14, 0, 15 };
  273. v8i16 short_multiplier = { 1, 2, 3, 4, 5, 6, 7, 8 };
  274. v4i32 int_multiplier = { 0, 1, 2, 3 };
  275. v16u8 src_top = { 0 };
  276. v8i16 vec9, vec10;
  277. v4i32 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, res_add;
  278. load0 = LD(src - (stride + 1));
  279. load1 = LD(src - (stride + 1) + 9);
  280. INSERT_D2_UB(load0, load1, src_top);
  281. src_top = (v16u8) __msa_vshf_b(shf_mask, (v16i8) src_top, (v16i8) src_top);
  282. vec9 = __msa_hsub_u_h(src_top, src_top);
  283. vec9 *= short_multiplier;
  284. vec8 = __msa_hadd_s_w(vec9, vec9);
  285. res_add = (v4i32) __msa_hadd_s_d(vec8, vec8);
  286. res0 = __msa_copy_s_w(res_add, 0) + __msa_copy_s_w(res_add, 2);
  287. res1 = (src[8 * stride - 1] - src[6 * stride - 1]) +
  288. 2 * (src[9 * stride - 1] - src[5 * stride - 1]) +
  289. 3 * (src[10 * stride - 1] - src[4 * stride - 1]) +
  290. 4 * (src[11 * stride - 1] - src[3 * stride - 1]) +
  291. 5 * (src[12 * stride - 1] - src[2 * stride - 1]) +
  292. 6 * (src[13 * stride - 1] - src[stride - 1]) +
  293. 7 * (src[14 * stride - 1] - src[-1]) +
  294. 8 * (src[15 * stride - 1] - src[-1 * stride - 1]);
  295. res0 *= 5;
  296. res1 *= 5;
  297. res0 = (res0 + 32) >> 6;
  298. res1 = (res1 + 32) >> 6;
  299. res3 = 7 * (res0 + res1);
  300. res2 = 16 * (src[15 * stride - 1] + src[-stride + 15] + 1);
  301. res2 -= res3;
  302. vec8 = __msa_fill_w(res0);
  303. vec4 = __msa_fill_w(res2);
  304. vec5 = __msa_fill_w(res1);
  305. vec6 = vec8 * 4;
  306. vec7 = vec8 * int_multiplier;
  307. for (lpcnt = 16; lpcnt--;) {
  308. vec0 = vec7;
  309. vec0 += vec4;
  310. vec1 = vec0 + vec6;
  311. vec2 = vec1 + vec6;
  312. vec3 = vec2 + vec6;
  313. SRA_4V(vec0, vec1, vec2, vec3, 5);
  314. PCKEV_H2_SH(vec1, vec0, vec3, vec2, vec9, vec10);
  315. CLIP_SH2_0_255(vec9, vec10);
  316. PCKEV_ST_SB(vec9, vec10, src);
  317. src += stride;
  318. vec4 += vec5;
  319. }
  320. }
  321. static void intra_predict_dc_4blk_8x8_msa(uint8_t *src, int32_t stride)
  322. {
  323. uint8_t lp_cnt;
  324. uint32_t src0, src1, src3, src2 = 0;
  325. uint32_t out0, out1, out2, out3;
  326. v16u8 src_top;
  327. v8u16 add;
  328. v4u32 sum;
  329. src_top = LD_UB(src - stride);
  330. add = __msa_hadd_u_h((v16u8) src_top, (v16u8) src_top);
  331. sum = __msa_hadd_u_w(add, add);
  332. src0 = __msa_copy_u_w((v4i32) sum, 0);
  333. src1 = __msa_copy_u_w((v4i32) sum, 1);
  334. for (lp_cnt = 0; lp_cnt < 4; lp_cnt++) {
  335. src0 += src[lp_cnt * stride - 1];
  336. src2 += src[(4 + lp_cnt) * stride - 1];
  337. }
  338. src0 = (src0 + 4) >> 3;
  339. src3 = (src1 + src2 + 4) >> 3;
  340. src1 = (src1 + 2) >> 2;
  341. src2 = (src2 + 2) >> 2;
  342. out0 = src0 * 0x01010101;
  343. out1 = src1 * 0x01010101;
  344. out2 = src2 * 0x01010101;
  345. out3 = src3 * 0x01010101;
  346. for (lp_cnt = 4; lp_cnt--;) {
  347. SW(out0, src);
  348. SW(out1, (src + 4));
  349. SW(out2, (src + 4 * stride));
  350. SW(out3, (src + 4 * stride + 4));
  351. src += stride;
  352. }
  353. }
  354. static void intra_predict_hor_dc_8x8_msa(uint8_t *src, int32_t stride)
  355. {
  356. uint8_t lp_cnt;
  357. uint32_t src0 = 0, src1 = 0;
  358. uint64_t out0, out1;
  359. for (lp_cnt = 0; lp_cnt < 4; lp_cnt++) {
  360. src0 += src[lp_cnt * stride - 1];
  361. src1 += src[(4 + lp_cnt) * stride - 1];
  362. }
  363. src0 = (src0 + 2) >> 2;
  364. src1 = (src1 + 2) >> 2;
  365. out0 = src0 * 0x0101010101010101;
  366. out1 = src1 * 0x0101010101010101;
  367. for (lp_cnt = 4; lp_cnt--;) {
  368. SD(out0, src);
  369. SD(out1, (src + 4 * stride));
  370. src += stride;
  371. }
  372. }
  373. static void intra_predict_vert_dc_8x8_msa(uint8_t *src, int32_t stride)
  374. {
  375. uint8_t lp_cnt;
  376. uint32_t out0 = 0, out1 = 0;
  377. v16u8 src_top;
  378. v8u16 add;
  379. v4u32 sum;
  380. v4i32 res0, res1;
  381. src_top = LD_UB(src - stride);
  382. add = __msa_hadd_u_h(src_top, src_top);
  383. sum = __msa_hadd_u_w(add, add);
  384. sum = (v4u32) __msa_srari_w((v4i32) sum, 2);
  385. res0 = (v4i32) __msa_splati_b((v16i8) sum, 0);
  386. res1 = (v4i32) __msa_splati_b((v16i8) sum, 4);
  387. out0 = __msa_copy_u_w(res0, 0);
  388. out1 = __msa_copy_u_w(res1, 0);
  389. for (lp_cnt = 8; lp_cnt--;) {
  390. SW(out0, src);
  391. SW(out1, src + 4);
  392. src += stride;
  393. }
  394. }
  395. static void intra_predict_mad_cow_dc_l0t_8x8_msa(uint8_t *src, int32_t stride)
  396. {
  397. uint8_t lp_cnt;
  398. uint32_t src0, src1, src2 = 0;
  399. uint32_t out0, out1, out2;
  400. v16u8 src_top;
  401. v8u16 add;
  402. v4u32 sum;
  403. src_top = LD_UB(src - stride);
  404. add = __msa_hadd_u_h(src_top, src_top);
  405. sum = __msa_hadd_u_w(add, add);
  406. src0 = __msa_copy_u_w((v4i32) sum, 0);
  407. src1 = __msa_copy_u_w((v4i32) sum, 1);
  408. for (lp_cnt = 0; lp_cnt < 4; lp_cnt++) {
  409. src2 += src[lp_cnt * stride - 1];
  410. }
  411. src2 = (src0 + src2 + 4) >> 3;
  412. src0 = (src0 + 2) >> 2;
  413. src1 = (src1 + 2) >> 2;
  414. out0 = src0 * 0x01010101;
  415. out1 = src1 * 0x01010101;
  416. out2 = src2 * 0x01010101;
  417. for (lp_cnt = 4; lp_cnt--;) {
  418. SW(out2, src);
  419. SW(out1, src + 4);
  420. SW(out0, src + stride * 4);
  421. SW(out1, src + stride * 4 + 4);
  422. src += stride;
  423. }
  424. }
  425. static void intra_predict_mad_cow_dc_0lt_8x8_msa(uint8_t *src, int32_t stride)
  426. {
  427. uint8_t lp_cnt;
  428. uint32_t src0, src1, src2 = 0, src3;
  429. uint32_t out0, out1, out2, out3;
  430. v16u8 src_top;
  431. v8u16 add;
  432. v4u32 sum;
  433. src_top = LD_UB(src - stride);
  434. add = __msa_hadd_u_h(src_top, src_top);
  435. sum = __msa_hadd_u_w(add, add);
  436. src0 = __msa_copy_u_w((v4i32) sum, 0);
  437. src1 = __msa_copy_u_w((v4i32) sum, 1);
  438. for (lp_cnt = 0; lp_cnt < 4; lp_cnt++) {
  439. src2 += src[(4 + lp_cnt) * stride - 1];
  440. }
  441. src0 = (src0 + 2) >> 2;
  442. src3 = (src1 + src2 + 4) >> 3;
  443. src1 = (src1 + 2) >> 2;
  444. src2 = (src2 + 2) >> 2;
  445. out0 = src0 * 0x01010101;
  446. out1 = src1 * 0x01010101;
  447. out2 = src2 * 0x01010101;
  448. out3 = src3 * 0x01010101;
  449. for (lp_cnt = 4; lp_cnt--;) {
  450. SW(out0, src);
  451. SW(out1, src + 4);
  452. SW(out2, src + stride * 4);
  453. SW(out3, src + stride * 4 + 4);
  454. src += stride;
  455. }
  456. }
  457. static void intra_predict_mad_cow_dc_l00_8x8_msa(uint8_t *src, int32_t stride)
  458. {
  459. uint8_t lp_cnt;
  460. uint32_t src0 = 0;
  461. uint64_t out0, out1;
  462. for (lp_cnt = 0; lp_cnt < 4; lp_cnt++) {
  463. src0 += src[lp_cnt * stride - 1];
  464. }
  465. src0 = (src0 + 2) >> 2;
  466. out0 = src0 * 0x0101010101010101;
  467. out1 = 0x8080808080808080;
  468. for (lp_cnt = 4; lp_cnt--;) {
  469. SD(out0, src);
  470. SD(out1, src + stride * 4);
  471. src += stride;
  472. }
  473. }
  474. static void intra_predict_mad_cow_dc_0l0_8x8_msa(uint8_t *src, int32_t stride)
  475. {
  476. uint8_t lp_cnt;
  477. uint32_t src0 = 0;
  478. uint64_t out0, out1;
  479. for (lp_cnt = 0; lp_cnt < 4; lp_cnt++) {
  480. src0 += src[(4 + lp_cnt) * stride - 1];
  481. }
  482. src0 = (src0 + 2) >> 2;
  483. out0 = 0x8080808080808080;
  484. out1 = src0 * 0x0101010101010101;
  485. for (lp_cnt = 4; lp_cnt--;) {
  486. SD(out0, src);
  487. SD(out1, src + stride * 4);
  488. src += stride;
  489. }
  490. }
  491. void ff_h264_intra_predict_plane_8x8_msa(uint8_t *src, ptrdiff_t stride)
  492. {
  493. intra_predict_plane_8x8_msa(src, stride);
  494. }
  495. void ff_h264_intra_predict_dc_4blk_8x8_msa(uint8_t *src, ptrdiff_t stride)
  496. {
  497. intra_predict_dc_4blk_8x8_msa(src, stride);
  498. }
  499. void ff_h264_intra_predict_hor_dc_8x8_msa(uint8_t *src, ptrdiff_t stride)
  500. {
  501. intra_predict_hor_dc_8x8_msa(src, stride);
  502. }
  503. void ff_h264_intra_predict_vert_dc_8x8_msa(uint8_t *src, ptrdiff_t stride)
  504. {
  505. intra_predict_vert_dc_8x8_msa(src, stride);
  506. }
  507. void ff_h264_intra_predict_mad_cow_dc_l0t_8x8_msa(uint8_t *src,
  508. ptrdiff_t stride)
  509. {
  510. intra_predict_mad_cow_dc_l0t_8x8_msa(src, stride);
  511. }
  512. void ff_h264_intra_predict_mad_cow_dc_0lt_8x8_msa(uint8_t *src,
  513. ptrdiff_t stride)
  514. {
  515. intra_predict_mad_cow_dc_0lt_8x8_msa(src, stride);
  516. }
  517. void ff_h264_intra_predict_mad_cow_dc_l00_8x8_msa(uint8_t *src,
  518. ptrdiff_t stride)
  519. {
  520. intra_predict_mad_cow_dc_l00_8x8_msa(src, stride);
  521. }
  522. void ff_h264_intra_predict_mad_cow_dc_0l0_8x8_msa(uint8_t *src,
  523. ptrdiff_t stride)
  524. {
  525. intra_predict_mad_cow_dc_0l0_8x8_msa(src, stride);
  526. }
  527. void ff_h264_intra_predict_plane_16x16_msa(uint8_t *src, ptrdiff_t stride)
  528. {
  529. intra_predict_plane_16x16_msa(src, stride);
  530. }
  531. void ff_h264_intra_pred_vert_8x8_msa(uint8_t *src, ptrdiff_t stride)
  532. {
  533. uint8_t *dst = src;
  534. intra_predict_vert_8x8_msa(src - stride, dst, stride);
  535. }
  536. void ff_h264_intra_pred_horiz_8x8_msa(uint8_t *src, ptrdiff_t stride)
  537. {
  538. uint8_t *dst = src;
  539. intra_predict_horiz_8x8_msa(src - 1, stride, dst, stride);
  540. }
  541. void ff_h264_intra_pred_dc_16x16_msa(uint8_t *src, ptrdiff_t stride)
  542. {
  543. uint8_t *src_top = src - stride;
  544. uint8_t *src_left = src - 1;
  545. uint8_t *dst = src;
  546. intra_predict_dc_16x16_msa(src_top, src_left, stride, dst, stride, 1, 1);
  547. }
  548. void ff_h264_intra_pred_vert_16x16_msa(uint8_t *src, ptrdiff_t stride)
  549. {
  550. uint8_t *dst = src;
  551. intra_predict_vert_16x16_msa(src - stride, dst, stride);
  552. }
  553. void ff_h264_intra_pred_horiz_16x16_msa(uint8_t *src, ptrdiff_t stride)
  554. {
  555. uint8_t *dst = src;
  556. intra_predict_horiz_16x16_msa(src - 1, stride, dst, stride);
  557. }
  558. void ff_h264_intra_pred_dc_left_16x16_msa(uint8_t *src, ptrdiff_t stride)
  559. {
  560. uint8_t *src_top = src - stride;
  561. uint8_t *src_left = src - 1;
  562. uint8_t *dst = src;
  563. intra_predict_dc_16x16_msa(src_top, src_left, stride, dst, stride, 0, 1);
  564. }
  565. void ff_h264_intra_pred_dc_top_16x16_msa(uint8_t *src, ptrdiff_t stride)
  566. {
  567. uint8_t *src_top = src - stride;
  568. uint8_t *src_left = src - 1;
  569. uint8_t *dst = src;
  570. intra_predict_dc_16x16_msa(src_top, src_left, stride, dst, stride, 1, 0);
  571. }
  572. void ff_h264_intra_pred_dc_128_8x8_msa(uint8_t *src, ptrdiff_t stride)
  573. {
  574. uint8_t *src_top = src - stride;
  575. uint8_t *src_left = src - 1;
  576. uint8_t *dst = src;
  577. intra_predict_dc_8x8_msa(src_top, src_left, stride, dst, stride, 0, 0);
  578. }
  579. void ff_h264_intra_pred_dc_128_16x16_msa(uint8_t *src, ptrdiff_t stride)
  580. {
  581. uint8_t *src_top = src - stride;
  582. uint8_t *src_left = src - 1;
  583. uint8_t *dst = src;
  584. intra_predict_dc_16x16_msa(src_top, src_left, stride, dst, stride, 0, 0);
  585. }
  586. void ff_vp8_pred8x8_127_dc_8_msa(uint8_t *src, ptrdiff_t stride)
  587. {
  588. intra_predict_127dc_8x8_msa(src, stride);
  589. }
  590. void ff_vp8_pred8x8_129_dc_8_msa(uint8_t *src, ptrdiff_t stride)
  591. {
  592. intra_predict_129dc_8x8_msa(src, stride);
  593. }
  594. void ff_vp8_pred16x16_127_dc_8_msa(uint8_t *src, ptrdiff_t stride)
  595. {
  596. intra_predict_127dc_16x16_msa(src, stride);
  597. }
  598. void ff_vp8_pred16x16_129_dc_8_msa(uint8_t *src, ptrdiff_t stride)
  599. {
  600. intra_predict_129dc_16x16_msa(src, stride);
  601. }