You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

351 lines
11KB

  1. /*
  2. * aligned/packed access motion
  3. *
  4. * Copyright (c) 2001-2003 BERO <bero@geocities.co.jp>
  5. *
  6. * This file is part of Libav.
  7. *
  8. * Libav is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * Libav is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with Libav; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "libavutil/attributes.h"
  23. #include "libavcodec/avcodec.h"
  24. #include "libavcodec/dsputil.h"
  25. #include "libavcodec/hpeldsp.h"
  26. #include "libavcodec/rnd_avg.h"
  27. #include "dsputil_sh4.h"
  28. #define LP(p) *(uint32_t*)(p)
  29. #define LPC(p) *(const uint32_t*)(p)
  30. #define UNPACK(ph,pl,tt0,tt1) do { \
  31. uint32_t t0,t1; t0=tt0;t1=tt1; \
  32. ph = ( (t0 & ~BYTE_VEC32(0x03))>>2) + ( (t1 & ~BYTE_VEC32(0x03))>>2); \
  33. pl = (t0 & BYTE_VEC32(0x03)) + (t1 & BYTE_VEC32(0x03)); } while(0)
  34. #define rnd_PACK(ph,pl,nph,npl) ph + nph + (((pl + npl + BYTE_VEC32(0x02))>>2) & BYTE_VEC32(0x03))
  35. #define no_rnd_PACK(ph,pl,nph,npl) ph + nph + (((pl + npl + BYTE_VEC32(0x01))>>2) & BYTE_VEC32(0x03))
  36. /* little-endian */
  37. #define MERGE1(a,b,ofs) (ofs==0)?a:( ((a)>>(8*ofs))|((b)<<(32-8*ofs)) )
  38. #define MERGE2(a,b,ofs) (ofs==3)?b:( ((a)>>(8*(ofs+1)))|((b)<<(32-8*(ofs+1))) )
  39. /* big
  40. #define MERGE1(a,b,ofs) (ofs==0)?a:( ((a)<<(8*ofs))|((b)>>(32-8*ofs)) )
  41. #define MERGE2(a,b,ofs) (ofs==3)?b:( ((a)<<(8+8*ofs))|((b)>>(32-8-8*ofs)) )
  42. */
  43. #define put(d,s) d = s
  44. #define avg(d,s) d = rnd_avg32(s,d)
  45. #define OP_C4(ofs) \
  46. ref-=ofs; \
  47. do { \
  48. OP(LP(dest),MERGE1(LPC(ref),LPC(ref+4),ofs)); \
  49. ref+=stride; \
  50. dest+=stride; \
  51. } while(--height)
  52. #define OP_C40() \
  53. do { \
  54. OP(LP(dest),LPC(ref)); \
  55. ref+=stride; \
  56. dest+=stride; \
  57. } while(--height)
  58. #define OP put
  59. static void put_pixels4_c(uint8_t *dest, const uint8_t *ref,
  60. const int stride, int height)
  61. {
  62. switch((int)ref&3){
  63. case 0: OP_C40(); return;
  64. case 1: OP_C4(1); return;
  65. case 2: OP_C4(2); return;
  66. case 3: OP_C4(3); return;
  67. }
  68. }
  69. #undef OP
  70. #define OP avg
  71. static void avg_pixels4_c(uint8_t *dest, const uint8_t *ref,
  72. const int stride, int height)
  73. {
  74. switch((int)ref&3){
  75. case 0: OP_C40(); return;
  76. case 1: OP_C4(1); return;
  77. case 2: OP_C4(2); return;
  78. case 3: OP_C4(3); return;
  79. }
  80. }
  81. #undef OP
  82. #define OP_C(ofs,sz,avg2) \
  83. { \
  84. ref-=ofs; \
  85. do { \
  86. uint32_t t0,t1; \
  87. t0 = LPC(ref+0); \
  88. t1 = LPC(ref+4); \
  89. OP(LP(dest+0), MERGE1(t0,t1,ofs)); \
  90. t0 = LPC(ref+8); \
  91. OP(LP(dest+4), MERGE1(t1,t0,ofs)); \
  92. if (sz==16) { \
  93. t1 = LPC(ref+12); \
  94. OP(LP(dest+8), MERGE1(t0,t1,ofs)); \
  95. t0 = LPC(ref+16); \
  96. OP(LP(dest+12), MERGE1(t1,t0,ofs)); \
  97. } \
  98. ref+=stride; \
  99. dest+= stride; \
  100. } while(--height); \
  101. }
  102. /* aligned */
  103. #define OP_C0(sz,avg2) \
  104. { \
  105. do { \
  106. OP(LP(dest+0), LPC(ref+0)); \
  107. OP(LP(dest+4), LPC(ref+4)); \
  108. if (sz==16) { \
  109. OP(LP(dest+8), LPC(ref+8)); \
  110. OP(LP(dest+12), LPC(ref+12)); \
  111. } \
  112. ref+=stride; \
  113. dest+= stride; \
  114. } while(--height); \
  115. }
  116. #define OP_X(ofs,sz,avg2) \
  117. { \
  118. ref-=ofs; \
  119. do { \
  120. uint32_t t0,t1; \
  121. t0 = LPC(ref+0); \
  122. t1 = LPC(ref+4); \
  123. OP(LP(dest+0), avg2(MERGE1(t0,t1,ofs),MERGE2(t0,t1,ofs))); \
  124. t0 = LPC(ref+8); \
  125. OP(LP(dest+4), avg2(MERGE1(t1,t0,ofs),MERGE2(t1,t0,ofs))); \
  126. if (sz==16) { \
  127. t1 = LPC(ref+12); \
  128. OP(LP(dest+8), avg2(MERGE1(t0,t1,ofs),MERGE2(t0,t1,ofs))); \
  129. t0 = LPC(ref+16); \
  130. OP(LP(dest+12), avg2(MERGE1(t1,t0,ofs),MERGE2(t1,t0,ofs))); \
  131. } \
  132. ref+=stride; \
  133. dest+= stride; \
  134. } while(--height); \
  135. }
  136. /* aligned */
  137. #define OP_Y0(sz,avg2) \
  138. { \
  139. uint32_t t0,t1,t2,t3,t; \
  140. \
  141. t0 = LPC(ref+0); \
  142. t1 = LPC(ref+4); \
  143. if (sz==16) { \
  144. t2 = LPC(ref+8); \
  145. t3 = LPC(ref+12); \
  146. } \
  147. do { \
  148. ref += stride; \
  149. \
  150. t = LPC(ref+0); \
  151. OP(LP(dest+0), avg2(t0,t)); t0 = t; \
  152. t = LPC(ref+4); \
  153. OP(LP(dest+4), avg2(t1,t)); t1 = t; \
  154. if (sz==16) { \
  155. t = LPC(ref+8); \
  156. OP(LP(dest+8), avg2(t2,t)); t2 = t; \
  157. t = LPC(ref+12); \
  158. OP(LP(dest+12), avg2(t3,t)); t3 = t; \
  159. } \
  160. dest+= stride; \
  161. } while(--height); \
  162. }
  163. #define OP_Y(ofs,sz,avg2) \
  164. { \
  165. uint32_t t0,t1,t2,t3,t,w0,w1; \
  166. \
  167. ref-=ofs; \
  168. w0 = LPC(ref+0); \
  169. w1 = LPC(ref+4); \
  170. t0 = MERGE1(w0,w1,ofs); \
  171. w0 = LPC(ref+8); \
  172. t1 = MERGE1(w1,w0,ofs); \
  173. if (sz==16) { \
  174. w1 = LPC(ref+12); \
  175. t2 = MERGE1(w0,w1,ofs); \
  176. w0 = LPC(ref+16); \
  177. t3 = MERGE1(w1,w0,ofs); \
  178. } \
  179. do { \
  180. ref += stride; \
  181. \
  182. w0 = LPC(ref+0); \
  183. w1 = LPC(ref+4); \
  184. t = MERGE1(w0,w1,ofs); \
  185. OP(LP(dest+0), avg2(t0,t)); t0 = t; \
  186. w0 = LPC(ref+8); \
  187. t = MERGE1(w1,w0,ofs); \
  188. OP(LP(dest+4), avg2(t1,t)); t1 = t; \
  189. if (sz==16) { \
  190. w1 = LPC(ref+12); \
  191. t = MERGE1(w0,w1,ofs); \
  192. OP(LP(dest+8), avg2(t2,t)); t2 = t; \
  193. w0 = LPC(ref+16); \
  194. t = MERGE1(w1,w0,ofs); \
  195. OP(LP(dest+12), avg2(t3,t)); t3 = t; \
  196. } \
  197. dest+=stride; \
  198. } while(--height); \
  199. }
  200. #define OP_X0(sz,avg2) OP_X(0,sz,avg2)
  201. #define OP_XY0(sz,PACK) OP_XY(0,sz,PACK)
  202. #define OP_XY(ofs,sz,PACK) \
  203. { \
  204. uint32_t t2,t3,w0,w1; \
  205. uint32_t a0,a1,a2,a3,a4,a5,a6,a7; \
  206. \
  207. ref -= ofs; \
  208. w0 = LPC(ref+0); \
  209. w1 = LPC(ref+4); \
  210. UNPACK(a0,a1,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \
  211. w0 = LPC(ref+8); \
  212. UNPACK(a2,a3,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \
  213. if (sz==16) { \
  214. w1 = LPC(ref+12); \
  215. UNPACK(a4,a5,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \
  216. w0 = LPC(ref+16); \
  217. UNPACK(a6,a7,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \
  218. } \
  219. do { \
  220. ref+=stride; \
  221. w0 = LPC(ref+0); \
  222. w1 = LPC(ref+4); \
  223. UNPACK(t2,t3,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \
  224. OP(LP(dest+0),PACK(a0,a1,t2,t3)); \
  225. a0 = t2; a1 = t3; \
  226. w0 = LPC(ref+8); \
  227. UNPACK(t2,t3,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \
  228. OP(LP(dest+4),PACK(a2,a3,t2,t3)); \
  229. a2 = t2; a3 = t3; \
  230. if (sz==16) { \
  231. w1 = LPC(ref+12); \
  232. UNPACK(t2,t3,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \
  233. OP(LP(dest+8),PACK(a4,a5,t2,t3)); \
  234. a4 = t2; a5 = t3; \
  235. w0 = LPC(ref+16); \
  236. UNPACK(t2,t3,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \
  237. OP(LP(dest+12),PACK(a6,a7,t2,t3)); \
  238. a6 = t2; a7 = t3; \
  239. } \
  240. dest+=stride; \
  241. } while(--height); \
  242. }
  243. #define DEFFUNC(prefix, op, rnd, xy, sz, OP_N, avgfunc) \
  244. prefix void op##_##rnd##_pixels##sz##_##xy(uint8_t *dest, const uint8_t *ref, \
  245. const ptrdiff_t stride, int height) \
  246. { \
  247. switch((int)ref&3) { \
  248. case 0:OP_N##0(sz,rnd##_##avgfunc); return; \
  249. case 1:OP_N(1,sz,rnd##_##avgfunc); return; \
  250. case 2:OP_N(2,sz,rnd##_##avgfunc); return; \
  251. case 3:OP_N(3,sz,rnd##_##avgfunc); return; \
  252. } \
  253. }
  254. #define OP put
  255. DEFFUNC( ,ff_put,rnd,o,8,OP_C,avg32)
  256. DEFFUNC(static,put, rnd,x,8,OP_X,avg32)
  257. DEFFUNC(static,put,no_rnd,x,8,OP_X,avg32)
  258. DEFFUNC(static,put, rnd,y,8,OP_Y,avg32)
  259. DEFFUNC(static,put,no_rnd,y,8,OP_Y,avg32)
  260. DEFFUNC(static,put, rnd,xy,8,OP_XY,PACK)
  261. DEFFUNC(static,put,no_rnd,xy,8,OP_XY,PACK)
  262. DEFFUNC( ,ff_put,rnd,o,16,OP_C,avg32)
  263. DEFFUNC(static,put, rnd,x,16,OP_X,avg32)
  264. DEFFUNC(static,put,no_rnd,x,16,OP_X,avg32)
  265. DEFFUNC(static,put, rnd,y,16,OP_Y,avg32)
  266. DEFFUNC(static,put,no_rnd,y,16,OP_Y,avg32)
  267. DEFFUNC(static,put, rnd,xy,16,OP_XY,PACK)
  268. DEFFUNC(static,put,no_rnd,xy,16,OP_XY,PACK)
  269. #undef OP
  270. #define OP avg
  271. DEFFUNC( ,ff_avg,rnd,o,8,OP_C,avg32)
  272. DEFFUNC(static,avg, rnd,x,8,OP_X,avg32)
  273. DEFFUNC(static,avg, rnd,y,8,OP_Y,avg32)
  274. DEFFUNC(static,avg, rnd,xy,8,OP_XY,PACK)
  275. DEFFUNC( ,ff_avg,rnd,o,16,OP_C,avg32)
  276. DEFFUNC(static,avg, rnd,x,16,OP_X,avg32)
  277. DEFFUNC(static,avg,no_rnd,x,16,OP_X,avg32)
  278. DEFFUNC(static,avg, rnd,y,16,OP_Y,avg32)
  279. DEFFUNC(static,avg,no_rnd,y,16,OP_Y,avg32)
  280. DEFFUNC(static,avg, rnd,xy,16,OP_XY,PACK)
  281. DEFFUNC(static,avg,no_rnd,xy,16,OP_XY,PACK)
  282. #undef OP
  283. #define ff_put_no_rnd_pixels8_o ff_put_rnd_pixels8_o
  284. #define ff_put_no_rnd_pixels16_o ff_put_rnd_pixels16_o
  285. #define ff_avg_no_rnd_pixels16_o ff_avg_rnd_pixels16_o
  286. av_cold void ff_hpeldsp_init_sh4(HpelDSPContext *c, int flags)
  287. {
  288. c->put_pixels_tab[0][0] = ff_put_rnd_pixels16_o;
  289. c->put_pixels_tab[0][1] = put_rnd_pixels16_x;
  290. c->put_pixels_tab[0][2] = put_rnd_pixels16_y;
  291. c->put_pixels_tab[0][3] = put_rnd_pixels16_xy;
  292. c->put_pixels_tab[1][0] = ff_put_rnd_pixels8_o;
  293. c->put_pixels_tab[1][1] = put_rnd_pixels8_x;
  294. c->put_pixels_tab[1][2] = put_rnd_pixels8_y;
  295. c->put_pixels_tab[1][3] = put_rnd_pixels8_xy;
  296. c->put_no_rnd_pixels_tab[0][0] = ff_put_no_rnd_pixels16_o;
  297. c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x;
  298. c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y;
  299. c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy;
  300. c->put_no_rnd_pixels_tab[1][0] = ff_put_no_rnd_pixels8_o;
  301. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x;
  302. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y;
  303. c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy;
  304. c->avg_pixels_tab[0][0] = ff_avg_rnd_pixels16_o;
  305. c->avg_pixels_tab[0][1] = avg_rnd_pixels16_x;
  306. c->avg_pixels_tab[0][2] = avg_rnd_pixels16_y;
  307. c->avg_pixels_tab[0][3] = avg_rnd_pixels16_xy;
  308. c->avg_pixels_tab[1][0] = ff_avg_rnd_pixels8_o;
  309. c->avg_pixels_tab[1][1] = avg_rnd_pixels8_x;
  310. c->avg_pixels_tab[1][2] = avg_rnd_pixels8_y;
  311. c->avg_pixels_tab[1][3] = avg_rnd_pixels8_xy;
  312. c->avg_no_rnd_pixels_tab[0] = ff_avg_no_rnd_pixels16_o;
  313. c->avg_no_rnd_pixels_tab[1] = avg_no_rnd_pixels16_x;
  314. c->avg_no_rnd_pixels_tab[2] = avg_no_rnd_pixels16_y;
  315. c->avg_no_rnd_pixels_tab[3] = avg_no_rnd_pixels16_xy;
  316. }