You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

893 lines
40KB

  1. /*
  2. * MMX and SSE2 optimized snow DSP utils
  3. * Copyright (c) 2005-2006 Robert Edele <yartrebo@earthlink.net>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "avcodec.h"
  22. #include "snow.h"
  23. #include "x86_cpu.h"
  24. void ff_snow_horizontal_compose97i_sse2(DWTELEM *b, int width){
  25. const int w2= (width+1)>>1;
  26. // SSE2 code runs faster with pointers aligned on a 32-byte boundary.
  27. DWTELEM temp_buf[(width>>1) + 4];
  28. DWTELEM * const temp = temp_buf + 4 - (((int)temp_buf & 0xF) >> 2);
  29. const int w_l= (width>>1);
  30. const int w_r= w2 - 1;
  31. int i;
  32. { // Lift 0
  33. DWTELEM * const ref = b + w2 - 1;
  34. DWTELEM b_0 = b[0]; //By allowing the first entry in b[0] to be calculated twice
  35. // (the first time erroneously), we allow the SSE2 code to run an extra pass.
  36. // The savings in code and time are well worth having to store this value and
  37. // calculate b[0] correctly afterwards.
  38. i = 0;
  39. asm volatile(
  40. "pcmpeqd %%xmm7, %%xmm7 \n\t"
  41. "pslld $31, %%xmm7 \n\t"
  42. "psrld $29, %%xmm7 \n\t"
  43. ::);
  44. for(; i<w_l-7; i+=8){
  45. asm volatile(
  46. "movdqu (%1), %%xmm1 \n\t"
  47. "movdqu 16(%1), %%xmm5 \n\t"
  48. "movdqu 4(%1), %%xmm2 \n\t"
  49. "movdqu 20(%1), %%xmm6 \n\t"
  50. "paddd %%xmm1, %%xmm2 \n\t"
  51. "paddd %%xmm5, %%xmm6 \n\t"
  52. "movdqa %%xmm2, %%xmm0 \n\t"
  53. "movdqa %%xmm6, %%xmm4 \n\t"
  54. "paddd %%xmm2, %%xmm2 \n\t"
  55. "paddd %%xmm6, %%xmm6 \n\t"
  56. "paddd %%xmm0, %%xmm2 \n\t"
  57. "paddd %%xmm4, %%xmm6 \n\t"
  58. "paddd %%xmm7, %%xmm2 \n\t"
  59. "paddd %%xmm7, %%xmm6 \n\t"
  60. "psrad $3, %%xmm2 \n\t"
  61. "psrad $3, %%xmm6 \n\t"
  62. "movdqa (%0), %%xmm0 \n\t"
  63. "movdqa 16(%0), %%xmm4 \n\t"
  64. "psubd %%xmm2, %%xmm0 \n\t"
  65. "psubd %%xmm6, %%xmm4 \n\t"
  66. "movdqa %%xmm0, (%0) \n\t"
  67. "movdqa %%xmm4, 16(%0) \n\t"
  68. :: "r"(&b[i]), "r"(&ref[i])
  69. : "memory"
  70. );
  71. }
  72. snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS);
  73. b[0] = b_0 - ((W_DM * 2 * ref[1]+W_DO)>>W_DS);
  74. }
  75. { // Lift 1
  76. DWTELEM * const dst = b+w2;
  77. i = 0;
  78. for(; (((long)&dst[i]) & 0xF) && i<w_r; i++){
  79. dst[i] = dst[i] - (b[i] + b[i + 1]);
  80. }
  81. for(; i<w_r-7; i+=8){
  82. asm volatile(
  83. "movdqu (%1), %%xmm1 \n\t"
  84. "movdqu 16(%1), %%xmm5 \n\t"
  85. "movdqu 4(%1), %%xmm2 \n\t"
  86. "movdqu 20(%1), %%xmm6 \n\t"
  87. "paddd %%xmm1, %%xmm2 \n\t"
  88. "paddd %%xmm5, %%xmm6 \n\t"
  89. "movdqa (%0), %%xmm0 \n\t"
  90. "movdqa 16(%0), %%xmm4 \n\t"
  91. "psubd %%xmm2, %%xmm0 \n\t"
  92. "psubd %%xmm6, %%xmm4 \n\t"
  93. "movdqa %%xmm0, (%0) \n\t"
  94. "movdqa %%xmm4, 16(%0) \n\t"
  95. :: "r"(&dst[i]), "r"(&b[i])
  96. : "memory"
  97. );
  98. }
  99. snow_horizontal_compose_lift_lead_out(i, dst, dst, b, width, w_r, 1, W_CM, W_CO, W_CS);
  100. }
  101. { // Lift 2
  102. DWTELEM * const ref = b+w2 - 1;
  103. DWTELEM b_0 = b[0];
  104. i = 0;
  105. asm volatile(
  106. "pslld $1, %%xmm7 \n\t"
  107. ::);
  108. for(; i<w_l-7; i+=8){
  109. asm volatile(
  110. "movdqu (%1), %%xmm1 \n\t"
  111. "movdqu 16(%1), %%xmm5 \n\t"
  112. "movdqu 4(%1), %%xmm0 \n\t"
  113. "movdqu 20(%1), %%xmm4 \n\t" //FIXME try aligned reads and shifts
  114. "paddd %%xmm1, %%xmm0 \n\t"
  115. "paddd %%xmm5, %%xmm4 \n\t"
  116. "paddd %%xmm7, %%xmm0 \n\t"
  117. "paddd %%xmm7, %%xmm4 \n\t"
  118. "movdqa (%0), %%xmm1 \n\t"
  119. "movdqa 16(%0), %%xmm5 \n\t"
  120. "psrad $2, %%xmm0 \n\t"
  121. "psrad $2, %%xmm4 \n\t"
  122. "paddd %%xmm1, %%xmm0 \n\t"
  123. "paddd %%xmm5, %%xmm4 \n\t"
  124. "psrad $2, %%xmm0 \n\t"
  125. "psrad $2, %%xmm4 \n\t"
  126. "paddd %%xmm1, %%xmm0 \n\t"
  127. "paddd %%xmm5, %%xmm4 \n\t"
  128. "movdqa %%xmm0, (%0) \n\t"
  129. "movdqa %%xmm4, 16(%0) \n\t"
  130. :: "r"(&b[i]), "r"(&ref[i])
  131. : "memory"
  132. );
  133. }
  134. snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l);
  135. b[0] = b_0 + ((2 * ref[1] + W_BO-1 + 4 * b_0) >> W_BS);
  136. }
  137. { // Lift 3
  138. DWTELEM * const src = b+w2;
  139. i = 0;
  140. for(; (((long)&temp[i]) & 0xF) && i<w_r; i++){
  141. temp[i] = src[i] - ((-W_AM*(b[i] + b[i+1]))>>W_AS);
  142. }
  143. for(; i<w_r-7; i+=8){
  144. asm volatile(
  145. "movdqu 4(%1), %%xmm2 \n\t"
  146. "movdqu 20(%1), %%xmm6 \n\t"
  147. "paddd (%1), %%xmm2 \n\t"
  148. "paddd 16(%1), %%xmm6 \n\t"
  149. "movdqu (%0), %%xmm0 \n\t"
  150. "movdqu 16(%0), %%xmm4 \n\t"
  151. "paddd %%xmm2, %%xmm0 \n\t"
  152. "paddd %%xmm6, %%xmm4 \n\t"
  153. "psrad $1, %%xmm2 \n\t"
  154. "psrad $1, %%xmm6 \n\t"
  155. "paddd %%xmm0, %%xmm2 \n\t"
  156. "paddd %%xmm4, %%xmm6 \n\t"
  157. "movdqa %%xmm2, (%2) \n\t"
  158. "movdqa %%xmm6, 16(%2) \n\t"
  159. :: "r"(&src[i]), "r"(&b[i]), "r"(&temp[i])
  160. : "memory"
  161. );
  162. }
  163. snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -W_AM, W_AO+1, W_AS);
  164. }
  165. {
  166. snow_interleave_line_header(&i, width, b, temp);
  167. for (; (i & 0x1E) != 0x1E; i-=2){
  168. b[i+1] = temp[i>>1];
  169. b[i] = b[i>>1];
  170. }
  171. for (i-=30; i>=0; i-=32){
  172. asm volatile(
  173. "movdqa (%1), %%xmm0 \n\t"
  174. "movdqa 16(%1), %%xmm2 \n\t"
  175. "movdqa 32(%1), %%xmm4 \n\t"
  176. "movdqa 48(%1), %%xmm6 \n\t"
  177. "movdqa (%1), %%xmm1 \n\t"
  178. "movdqa 16(%1), %%xmm3 \n\t"
  179. "movdqa 32(%1), %%xmm5 \n\t"
  180. "movdqa 48(%1), %%xmm7 \n\t"
  181. "punpckldq (%2), %%xmm0 \n\t"
  182. "punpckldq 16(%2), %%xmm2 \n\t"
  183. "punpckldq 32(%2), %%xmm4 \n\t"
  184. "punpckldq 48(%2), %%xmm6 \n\t"
  185. "movdqa %%xmm0, (%0) \n\t"
  186. "movdqa %%xmm2, 32(%0) \n\t"
  187. "movdqa %%xmm4, 64(%0) \n\t"
  188. "movdqa %%xmm6, 96(%0) \n\t"
  189. "punpckhdq (%2), %%xmm1 \n\t"
  190. "punpckhdq 16(%2), %%xmm3 \n\t"
  191. "punpckhdq 32(%2), %%xmm5 \n\t"
  192. "punpckhdq 48(%2), %%xmm7 \n\t"
  193. "movdqa %%xmm1, 16(%0) \n\t"
  194. "movdqa %%xmm3, 48(%0) \n\t"
  195. "movdqa %%xmm5, 80(%0) \n\t"
  196. "movdqa %%xmm7, 112(%0) \n\t"
  197. :: "r"(&(b)[i]), "r"(&(b)[i>>1]), "r"(&(temp)[i>>1])
  198. : "memory"
  199. );
  200. }
  201. }
  202. }
  203. void ff_snow_horizontal_compose97i_mmx(DWTELEM *b, int width){
  204. const int w2= (width+1)>>1;
  205. DWTELEM temp[width >> 1];
  206. const int w_l= (width>>1);
  207. const int w_r= w2 - 1;
  208. int i;
  209. { // Lift 0
  210. DWTELEM * const ref = b + w2 - 1;
  211. i = 1;
  212. b[0] = b[0] - ((W_DM * 2 * ref[1]+W_DO)>>W_DS);
  213. asm volatile(
  214. "pcmpeqd %%mm7, %%mm7 \n\t"
  215. "pslld $31, %%mm7 \n\t"
  216. "psrld $29, %%mm7 \n\t"
  217. ::);
  218. for(; i<w_l-3; i+=4){
  219. asm volatile(
  220. "movq (%1), %%mm2 \n\t"
  221. "movq 8(%1), %%mm6 \n\t"
  222. "paddd 4(%1), %%mm2 \n\t"
  223. "paddd 12(%1), %%mm6 \n\t"
  224. "movq %%mm2, %%mm0 \n\t"
  225. "movq %%mm6, %%mm4 \n\t"
  226. "paddd %%mm2, %%mm2 \n\t"
  227. "paddd %%mm6, %%mm6 \n\t"
  228. "paddd %%mm0, %%mm2 \n\t"
  229. "paddd %%mm4, %%mm6 \n\t"
  230. "paddd %%mm7, %%mm2 \n\t"
  231. "paddd %%mm7, %%mm6 \n\t"
  232. "psrad $3, %%mm2 \n\t"
  233. "psrad $3, %%mm6 \n\t"
  234. "movq (%0), %%mm0 \n\t"
  235. "movq 8(%0), %%mm4 \n\t"
  236. "psubd %%mm2, %%mm0 \n\t"
  237. "psubd %%mm6, %%mm4 \n\t"
  238. "movq %%mm0, (%0) \n\t"
  239. "movq %%mm4, 8(%0) \n\t"
  240. :: "r"(&b[i]), "r"(&ref[i])
  241. : "memory"
  242. );
  243. }
  244. snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS);
  245. }
  246. { // Lift 1
  247. DWTELEM * const dst = b+w2;
  248. i = 0;
  249. for(; i<w_r-3; i+=4){
  250. asm volatile(
  251. "movq (%1), %%mm2 \n\t"
  252. "movq 8(%1), %%mm6 \n\t"
  253. "paddd 4(%1), %%mm2 \n\t"
  254. "paddd 12(%1), %%mm6 \n\t"
  255. "movq (%0), %%mm0 \n\t"
  256. "movq 8(%0), %%mm4 \n\t"
  257. "psubd %%mm2, %%mm0 \n\t"
  258. "psubd %%mm6, %%mm4 \n\t"
  259. "movq %%mm0, (%0) \n\t"
  260. "movq %%mm4, 8(%0) \n\t"
  261. :: "r"(&dst[i]), "r"(&b[i])
  262. : "memory"
  263. );
  264. }
  265. snow_horizontal_compose_lift_lead_out(i, dst, dst, b, width, w_r, 1, W_CM, W_CO, W_CS);
  266. }
  267. { // Lift 2
  268. DWTELEM * const ref = b+w2 - 1;
  269. i = 1;
  270. b[0] = b[0] + (((2 * ref[1] + W_BO) + 4 * b[0]) >> W_BS);
  271. asm volatile(
  272. "pslld $1, %%mm7 \n\t"
  273. ::);
  274. for(; i<w_l-3; i+=4){
  275. asm volatile(
  276. "movq (%1), %%mm0 \n\t"
  277. "movq 8(%1), %%mm4 \n\t"
  278. "paddd 4(%1), %%mm0 \n\t"
  279. "paddd 12(%1), %%mm4 \n\t"
  280. "paddd %%mm7, %%mm0 \n\t"
  281. "paddd %%mm7, %%mm4 \n\t"
  282. "psrad $2, %%mm0 \n\t"
  283. "psrad $2, %%mm4 \n\t"
  284. "movq (%0), %%mm1 \n\t"
  285. "movq 8(%0), %%mm5 \n\t"
  286. "paddd %%mm1, %%mm0 \n\t"
  287. "paddd %%mm5, %%mm4 \n\t"
  288. "psrad $2, %%mm0 \n\t"
  289. "psrad $2, %%mm4 \n\t"
  290. "paddd %%mm1, %%mm0 \n\t"
  291. "paddd %%mm5, %%mm4 \n\t"
  292. "movq %%mm0, (%0) \n\t"
  293. "movq %%mm4, 8(%0) \n\t"
  294. :: "r"(&b[i]), "r"(&ref[i])
  295. : "memory"
  296. );
  297. }
  298. snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l);
  299. }
  300. { // Lift 3
  301. DWTELEM * const src = b+w2;
  302. i = 0;
  303. for(; i<w_r-3; i+=4){
  304. asm volatile(
  305. "movq 4(%1), %%mm2 \n\t"
  306. "movq 12(%1), %%mm6 \n\t"
  307. "paddd (%1), %%mm2 \n\t"
  308. "paddd 8(%1), %%mm6 \n\t"
  309. "movq (%0), %%mm0 \n\t"
  310. "movq 8(%0), %%mm4 \n\t"
  311. "paddd %%mm2, %%mm0 \n\t"
  312. "paddd %%mm6, %%mm4 \n\t"
  313. "psrad $1, %%mm2 \n\t"
  314. "psrad $1, %%mm6 \n\t"
  315. "paddd %%mm0, %%mm2 \n\t"
  316. "paddd %%mm4, %%mm6 \n\t"
  317. "movq %%mm2, (%2) \n\t"
  318. "movq %%mm6, 8(%2) \n\t"
  319. :: "r"(&src[i]), "r"(&b[i]), "r"(&temp[i])
  320. : "memory"
  321. );
  322. }
  323. snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -W_AM, W_AO+1, W_AS);
  324. }
  325. {
  326. snow_interleave_line_header(&i, width, b, temp);
  327. for (; (i & 0xE) != 0xE; i-=2){
  328. b[i+1] = temp[i>>1];
  329. b[i] = b[i>>1];
  330. }
  331. for (i-=14; i>=0; i-=16){
  332. asm volatile(
  333. "movq (%1), %%mm0 \n\t"
  334. "movq 8(%1), %%mm2 \n\t"
  335. "movq 16(%1), %%mm4 \n\t"
  336. "movq 24(%1), %%mm6 \n\t"
  337. "movq (%1), %%mm1 \n\t"
  338. "movq 8(%1), %%mm3 \n\t"
  339. "movq 16(%1), %%mm5 \n\t"
  340. "movq 24(%1), %%mm7 \n\t"
  341. "punpckldq (%2), %%mm0 \n\t"
  342. "punpckldq 8(%2), %%mm2 \n\t"
  343. "punpckldq 16(%2), %%mm4 \n\t"
  344. "punpckldq 24(%2), %%mm6 \n\t"
  345. "movq %%mm0, (%0) \n\t"
  346. "movq %%mm2, 16(%0) \n\t"
  347. "movq %%mm4, 32(%0) \n\t"
  348. "movq %%mm6, 48(%0) \n\t"
  349. "punpckhdq (%2), %%mm1 \n\t"
  350. "punpckhdq 8(%2), %%mm3 \n\t"
  351. "punpckhdq 16(%2), %%mm5 \n\t"
  352. "punpckhdq 24(%2), %%mm7 \n\t"
  353. "movq %%mm1, 8(%0) \n\t"
  354. "movq %%mm3, 24(%0) \n\t"
  355. "movq %%mm5, 40(%0) \n\t"
  356. "movq %%mm7, 56(%0) \n\t"
  357. :: "r"(&b[i]), "r"(&b[i>>1]), "r"(&temp[i>>1])
  358. : "memory"
  359. );
  360. }
  361. }
  362. }
  363. #define snow_vertical_compose_sse2_load_add(op,r,t0,t1,t2,t3)\
  364. ""op" (%%"r",%%"REG_d",4), %%"t0" \n\t"\
  365. ""op" 16(%%"r",%%"REG_d",4), %%"t1" \n\t"\
  366. ""op" 32(%%"r",%%"REG_d",4), %%"t2" \n\t"\
  367. ""op" 48(%%"r",%%"REG_d",4), %%"t3" \n\t"
  368. #define snow_vertical_compose_sse2_load(r,t0,t1,t2,t3)\
  369. snow_vertical_compose_sse2_load_add("movdqa",r,t0,t1,t2,t3)
  370. #define snow_vertical_compose_sse2_add(r,t0,t1,t2,t3)\
  371. snow_vertical_compose_sse2_load_add("paddd",r,t0,t1,t2,t3)
  372. #define snow_vertical_compose_sse2_sub(s0,s1,s2,s3,t0,t1,t2,t3)\
  373. "psubd %%"s0", %%"t0" \n\t"\
  374. "psubd %%"s1", %%"t1" \n\t"\
  375. "psubd %%"s2", %%"t2" \n\t"\
  376. "psubd %%"s3", %%"t3" \n\t"
  377. #define snow_vertical_compose_sse2_store(w,s0,s1,s2,s3)\
  378. "movdqa %%"s0", (%%"w",%%"REG_d",4) \n\t"\
  379. "movdqa %%"s1", 16(%%"w",%%"REG_d",4) \n\t"\
  380. "movdqa %%"s2", 32(%%"w",%%"REG_d",4) \n\t"\
  381. "movdqa %%"s3", 48(%%"w",%%"REG_d",4) \n\t"
  382. #define snow_vertical_compose_sse2_sra(n,t0,t1,t2,t3)\
  383. "psrad $"n", %%"t0" \n\t"\
  384. "psrad $"n", %%"t1" \n\t"\
  385. "psrad $"n", %%"t2" \n\t"\
  386. "psrad $"n", %%"t3" \n\t"
  387. #define snow_vertical_compose_sse2_r2r_add(s0,s1,s2,s3,t0,t1,t2,t3)\
  388. "paddd %%"s0", %%"t0" \n\t"\
  389. "paddd %%"s1", %%"t1" \n\t"\
  390. "paddd %%"s2", %%"t2" \n\t"\
  391. "paddd %%"s3", %%"t3" \n\t"
  392. #define snow_vertical_compose_sse2_move(s0,s1,s2,s3,t0,t1,t2,t3)\
  393. "movdqa %%"s0", %%"t0" \n\t"\
  394. "movdqa %%"s1", %%"t1" \n\t"\
  395. "movdqa %%"s2", %%"t2" \n\t"\
  396. "movdqa %%"s3", %%"t3" \n\t"
  397. void ff_snow_vertical_compose97i_sse2(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width){
  398. long i = width;
  399. while(i & 0xF)
  400. {
  401. i--;
  402. b4[i] -= (W_DM*(b3[i] + b5[i])+W_DO)>>W_DS;
  403. b3[i] -= (W_CM*(b2[i] + b4[i])+W_CO)>>W_CS;
  404. b2[i] += (W_BM*(b1[i] + b3[i])+4*b2[i]+W_BO)>>W_BS;
  405. b1[i] += (W_AM*(b0[i] + b2[i])+W_AO)>>W_AS;
  406. }
  407. asm volatile (
  408. "jmp 2f \n\t"
  409. "1: \n\t"
  410. "mov %6, %%"REG_a" \n\t"
  411. "mov %4, %%"REG_S" \n\t"
  412. snow_vertical_compose_sse2_load(REG_S,"xmm0","xmm2","xmm4","xmm6")
  413. snow_vertical_compose_sse2_add(REG_a,"xmm0","xmm2","xmm4","xmm6")
  414. snow_vertical_compose_sse2_move("xmm0","xmm2","xmm4","xmm6","xmm1","xmm3","xmm5","xmm7")
  415. snow_vertical_compose_sse2_r2r_add("xmm0","xmm2","xmm4","xmm6","xmm0","xmm2","xmm4","xmm6")
  416. snow_vertical_compose_sse2_r2r_add("xmm1","xmm3","xmm5","xmm7","xmm0","xmm2","xmm4","xmm6")
  417. "pcmpeqd %%xmm1, %%xmm1 \n\t"
  418. "pslld $31, %%xmm1 \n\t"
  419. "psrld $29, %%xmm1 \n\t"
  420. "mov %5, %%"REG_a" \n\t"
  421. snow_vertical_compose_sse2_r2r_add("xmm1","xmm1","xmm1","xmm1","xmm0","xmm2","xmm4","xmm6")
  422. snow_vertical_compose_sse2_sra("3","xmm0","xmm2","xmm4","xmm6")
  423. snow_vertical_compose_sse2_load(REG_a,"xmm1","xmm3","xmm5","xmm7")
  424. snow_vertical_compose_sse2_sub("xmm0","xmm2","xmm4","xmm6","xmm1","xmm3","xmm5","xmm7")
  425. snow_vertical_compose_sse2_store(REG_a,"xmm1","xmm3","xmm5","xmm7")
  426. "mov %3, %%"REG_c" \n\t"
  427. snow_vertical_compose_sse2_load(REG_S,"xmm0","xmm2","xmm4","xmm6")
  428. snow_vertical_compose_sse2_add(REG_c,"xmm1","xmm3","xmm5","xmm7")
  429. snow_vertical_compose_sse2_sub("xmm1","xmm3","xmm5","xmm7","xmm0","xmm2","xmm4","xmm6")
  430. snow_vertical_compose_sse2_store(REG_S,"xmm0","xmm2","xmm4","xmm6")
  431. "mov %2, %%"REG_a" \n\t"
  432. snow_vertical_compose_sse2_add(REG_a,"xmm0","xmm2","xmm4","xmm6")
  433. snow_vertical_compose_sse2_sra("2","xmm0","xmm2","xmm4","xmm6")
  434. snow_vertical_compose_sse2_add(REG_c,"xmm0","xmm2","xmm4","xmm6")
  435. "pcmpeqd %%xmm1, %%xmm1 \n\t"
  436. "pslld $31, %%xmm1 \n\t"
  437. "psrld $30, %%xmm1 \n\t"
  438. "mov %1, %%"REG_S" \n\t"
  439. snow_vertical_compose_sse2_r2r_add("xmm1","xmm1","xmm1","xmm1","xmm0","xmm2","xmm4","xmm6")
  440. snow_vertical_compose_sse2_sra("2","xmm0","xmm2","xmm4","xmm6")
  441. snow_vertical_compose_sse2_add(REG_c,"xmm0","xmm2","xmm4","xmm6")
  442. snow_vertical_compose_sse2_store(REG_c,"xmm0","xmm2","xmm4","xmm6")
  443. snow_vertical_compose_sse2_add(REG_S,"xmm0","xmm2","xmm4","xmm6")
  444. snow_vertical_compose_sse2_move("xmm0","xmm2","xmm4","xmm6","xmm1","xmm3","xmm5","xmm7")
  445. snow_vertical_compose_sse2_sra("1","xmm0","xmm2","xmm4","xmm6")
  446. snow_vertical_compose_sse2_r2r_add("xmm1","xmm3","xmm5","xmm7","xmm0","xmm2","xmm4","xmm6")
  447. snow_vertical_compose_sse2_add(REG_a,"xmm0","xmm2","xmm4","xmm6")
  448. snow_vertical_compose_sse2_store(REG_a,"xmm0","xmm2","xmm4","xmm6")
  449. "2: \n\t"
  450. "sub $16, %%"REG_d" \n\t"
  451. "jge 1b \n\t"
  452. :"+d"(i)
  453. :
  454. "m"(b0),"m"(b1),"m"(b2),"m"(b3),"m"(b4),"m"(b5):
  455. "%"REG_a"","%"REG_S"","%"REG_c"");
  456. }
  457. #define snow_vertical_compose_mmx_load_add(op,r,t0,t1,t2,t3)\
  458. ""op" (%%"r",%%"REG_d",4), %%"t0" \n\t"\
  459. ""op" 8(%%"r",%%"REG_d",4), %%"t1" \n\t"\
  460. ""op" 16(%%"r",%%"REG_d",4), %%"t2" \n\t"\
  461. ""op" 24(%%"r",%%"REG_d",4), %%"t3" \n\t"
  462. #define snow_vertical_compose_mmx_load(r,t0,t1,t2,t3)\
  463. snow_vertical_compose_mmx_load_add("movq",r,t0,t1,t2,t3)
  464. #define snow_vertical_compose_mmx_add(r,t0,t1,t2,t3)\
  465. snow_vertical_compose_mmx_load_add("paddd",r,t0,t1,t2,t3)
  466. #define snow_vertical_compose_mmx_sub(s0,s1,s2,s3,t0,t1,t2,t3)\
  467. snow_vertical_compose_sse2_sub(s0,s1,s2,s3,t0,t1,t2,t3)
  468. #define snow_vertical_compose_mmx_store(w,s0,s1,s2,s3)\
  469. "movq %%"s0", (%%"w",%%"REG_d",4) \n\t"\
  470. "movq %%"s1", 8(%%"w",%%"REG_d",4) \n\t"\
  471. "movq %%"s2", 16(%%"w",%%"REG_d",4) \n\t"\
  472. "movq %%"s3", 24(%%"w",%%"REG_d",4) \n\t"
  473. #define snow_vertical_compose_mmx_sra(n,t0,t1,t2,t3)\
  474. snow_vertical_compose_sse2_sra(n,t0,t1,t2,t3)
  475. #define snow_vertical_compose_mmx_r2r_add(s0,s1,s2,s3,t0,t1,t2,t3)\
  476. snow_vertical_compose_sse2_r2r_add(s0,s1,s2,s3,t0,t1,t2,t3)
  477. #define snow_vertical_compose_mmx_move(s0,s1,s2,s3,t0,t1,t2,t3)\
  478. "movq %%"s0", %%"t0" \n\t"\
  479. "movq %%"s1", %%"t1" \n\t"\
  480. "movq %%"s2", %%"t2" \n\t"\
  481. "movq %%"s3", %%"t3" \n\t"
  482. void ff_snow_vertical_compose97i_mmx(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width){
  483. long i = width;
  484. while(i & 0x7)
  485. {
  486. i--;
  487. b4[i] -= (W_DM*(b3[i] + b5[i])+W_DO)>>W_DS;
  488. b3[i] -= (W_CM*(b2[i] + b4[i])+W_CO)>>W_CS;
  489. b2[i] += (W_BM*(b1[i] + b3[i])+4*b2[i]+W_BO)>>W_BS;
  490. b1[i] += (W_AM*(b0[i] + b2[i])+W_AO)>>W_AS;
  491. }
  492. asm volatile(
  493. "jmp 2f \n\t"
  494. "1: \n\t"
  495. "mov %6, %%"REG_a" \n\t"
  496. "mov %4, %%"REG_S" \n\t"
  497. snow_vertical_compose_mmx_load(REG_S,"mm0","mm2","mm4","mm6")
  498. snow_vertical_compose_mmx_add(REG_a,"mm0","mm2","mm4","mm6")
  499. snow_vertical_compose_mmx_move("mm0","mm2","mm4","mm6","mm1","mm3","mm5","mm7")
  500. snow_vertical_compose_mmx_r2r_add("mm0","mm2","mm4","mm6","mm0","mm2","mm4","mm6")
  501. snow_vertical_compose_mmx_r2r_add("mm1","mm3","mm5","mm7","mm0","mm2","mm4","mm6")
  502. "pcmpeqd %%mm1, %%mm1 \n\t"
  503. "pslld $31, %%mm1 \n\t"
  504. "psrld $29, %%mm1 \n\t"
  505. "mov %5, %%"REG_a" \n\t"
  506. snow_vertical_compose_mmx_r2r_add("mm1","mm1","mm1","mm1","mm0","mm2","mm4","mm6")
  507. snow_vertical_compose_mmx_sra("3","mm0","mm2","mm4","mm6")
  508. snow_vertical_compose_mmx_load(REG_a,"mm1","mm3","mm5","mm7")
  509. snow_vertical_compose_mmx_sub("mm0","mm2","mm4","mm6","mm1","mm3","mm5","mm7")
  510. snow_vertical_compose_mmx_store(REG_a,"mm1","mm3","mm5","mm7")
  511. "mov %3, %%"REG_c" \n\t"
  512. snow_vertical_compose_mmx_load(REG_S,"mm0","mm2","mm4","mm6")
  513. snow_vertical_compose_mmx_add(REG_c,"mm1","mm3","mm5","mm7")
  514. snow_vertical_compose_mmx_sub("mm1","mm3","mm5","mm7","mm0","mm2","mm4","mm6")
  515. snow_vertical_compose_mmx_store(REG_S,"mm0","mm2","mm4","mm6")
  516. "mov %2, %%"REG_a" \n\t"
  517. snow_vertical_compose_mmx_add(REG_a,"mm0","mm2","mm4","mm6")
  518. snow_vertical_compose_mmx_sra("2","mm0","mm2","mm4","mm6")
  519. snow_vertical_compose_mmx_add(REG_c,"mm0","mm2","mm4","mm6")
  520. "pcmpeqd %%mm1, %%mm1 \n\t"
  521. "pslld $31, %%mm1 \n\t"
  522. "psrld $30, %%mm1 \n\t"
  523. "mov %1, %%"REG_S" \n\t"
  524. snow_vertical_compose_mmx_r2r_add("mm1","mm1","mm1","mm1","mm0","mm2","mm4","mm6")
  525. snow_vertical_compose_mmx_sra("2","mm0","mm2","mm4","mm6")
  526. snow_vertical_compose_mmx_add(REG_c,"mm0","mm2","mm4","mm6")
  527. snow_vertical_compose_mmx_store(REG_c,"mm0","mm2","mm4","mm6")
  528. snow_vertical_compose_mmx_add(REG_S,"mm0","mm2","mm4","mm6")
  529. snow_vertical_compose_mmx_move("mm0","mm2","mm4","mm6","mm1","mm3","mm5","mm7")
  530. snow_vertical_compose_mmx_sra("1","mm0","mm2","mm4","mm6")
  531. snow_vertical_compose_mmx_r2r_add("mm1","mm3","mm5","mm7","mm0","mm2","mm4","mm6")
  532. snow_vertical_compose_mmx_add(REG_a,"mm0","mm2","mm4","mm6")
  533. snow_vertical_compose_mmx_store(REG_a,"mm0","mm2","mm4","mm6")
  534. "2: \n\t"
  535. "sub $8, %%"REG_d" \n\t"
  536. "jge 1b \n\t"
  537. :"+d"(i)
  538. :
  539. "m"(b0),"m"(b1),"m"(b2),"m"(b3),"m"(b4),"m"(b5):
  540. "%"REG_a"","%"REG_S"","%"REG_c"");
  541. }
  542. #define snow_inner_add_yblock_sse2_header \
  543. DWTELEM * * dst_array = sb->line + src_y;\
  544. long tmp;\
  545. asm volatile(\
  546. "mov %7, %%"REG_c" \n\t"\
  547. "mov %6, %2 \n\t"\
  548. "mov %4, %%"REG_S" \n\t"\
  549. "pxor %%xmm7, %%xmm7 \n\t" /* 0 */\
  550. "pcmpeqd %%xmm3, %%xmm3 \n\t"\
  551. "pslld $31, %%xmm3 \n\t"\
  552. "psrld $24, %%xmm3 \n\t" /* FRAC_BITS >> 1 */\
  553. "1: \n\t"\
  554. "mov %1, %%"REG_D" \n\t"\
  555. "mov (%%"REG_D"), %%"REG_D" \n\t"\
  556. "add %3, %%"REG_D" \n\t"
  557. #define snow_inner_add_yblock_sse2_start_8(out_reg1, out_reg2, ptr_offset, s_offset)\
  558. "mov "PTR_SIZE"*"ptr_offset"(%%"REG_a"), %%"REG_d"; \n\t"\
  559. "movq (%%"REG_d"), %%"out_reg1" \n\t"\
  560. "movq (%%"REG_d", %%"REG_c"), %%"out_reg2" \n\t"\
  561. "punpcklbw %%xmm7, %%"out_reg1" \n\t"\
  562. "punpcklbw %%xmm7, %%"out_reg2" \n\t"\
  563. "movq "s_offset"(%%"REG_S"), %%xmm0 \n\t"\
  564. "movq "s_offset"+16(%%"REG_S"), %%xmm4 \n\t"\
  565. "punpcklbw %%xmm7, %%xmm0 \n\t"\
  566. "punpcklbw %%xmm7, %%xmm4 \n\t"\
  567. "pmullw %%xmm0, %%"out_reg1" \n\t"\
  568. "pmullw %%xmm4, %%"out_reg2" \n\t"
  569. #define snow_inner_add_yblock_sse2_start_16(out_reg1, out_reg2, ptr_offset, s_offset)\
  570. "mov "PTR_SIZE"*"ptr_offset"(%%"REG_a"), %%"REG_d"; \n\t"\
  571. "movq (%%"REG_d"), %%"out_reg1" \n\t"\
  572. "movq 8(%%"REG_d"), %%"out_reg2" \n\t"\
  573. "punpcklbw %%xmm7, %%"out_reg1" \n\t"\
  574. "punpcklbw %%xmm7, %%"out_reg2" \n\t"\
  575. "movq "s_offset"(%%"REG_S"), %%xmm0 \n\t"\
  576. "movq "s_offset"+8(%%"REG_S"), %%xmm4 \n\t"\
  577. "punpcklbw %%xmm7, %%xmm0 \n\t"\
  578. "punpcklbw %%xmm7, %%xmm4 \n\t"\
  579. "pmullw %%xmm0, %%"out_reg1" \n\t"\
  580. "pmullw %%xmm4, %%"out_reg2" \n\t"
  581. #define snow_inner_add_yblock_sse2_accum_8(ptr_offset, s_offset) \
  582. snow_inner_add_yblock_sse2_start_8("xmm2", "xmm6", ptr_offset, s_offset)\
  583. "paddusw %%xmm2, %%xmm1 \n\t"\
  584. "paddusw %%xmm6, %%xmm5 \n\t"
  585. #define snow_inner_add_yblock_sse2_accum_16(ptr_offset, s_offset) \
  586. snow_inner_add_yblock_sse2_start_16("xmm2", "xmm6", ptr_offset, s_offset)\
  587. "paddusw %%xmm2, %%xmm1 \n\t"\
  588. "paddusw %%xmm6, %%xmm5 \n\t"
  589. #define snow_inner_add_yblock_sse2_end_common1\
  590. "add $32, %%"REG_S" \n\t"\
  591. "add %%"REG_c", %0 \n\t"\
  592. "add %%"REG_c", "PTR_SIZE"*3(%%"REG_a");\n\t"\
  593. "add %%"REG_c", "PTR_SIZE"*2(%%"REG_a");\n\t"\
  594. "add %%"REG_c", "PTR_SIZE"*1(%%"REG_a");\n\t"\
  595. "add %%"REG_c", (%%"REG_a") \n\t"
  596. #define snow_inner_add_yblock_sse2_end_common2\
  597. "jnz 1b \n\t"\
  598. :"+m"(dst8),"+m"(dst_array),"=&r"(tmp)\
  599. :\
  600. "rm"((long)(src_x<<2)),"m"(obmc),"a"(block),"m"((long)b_h),"m"((long)src_stride):\
  601. "%"REG_c"","%"REG_S"","%"REG_D"","%"REG_d"");
  602. #define snow_inner_add_yblock_sse2_end_8\
  603. "sal $1, %%"REG_c" \n\t"\
  604. "add $"PTR_SIZE"*2, %1 \n\t"\
  605. snow_inner_add_yblock_sse2_end_common1\
  606. "sar $1, %%"REG_c" \n\t"\
  607. "sub $2, %2 \n\t"\
  608. snow_inner_add_yblock_sse2_end_common2
  609. #define snow_inner_add_yblock_sse2_end_16\
  610. "add $"PTR_SIZE"*1, %1 \n\t"\
  611. snow_inner_add_yblock_sse2_end_common1\
  612. "dec %2 \n\t"\
  613. snow_inner_add_yblock_sse2_end_common2
  614. static void inner_add_yblock_bw_8_obmc_16_bh_even_sse2(const uint8_t *obmc, const long obmc_stride, uint8_t * * block, int b_w, long b_h,
  615. int src_x, int src_y, long src_stride, slice_buffer * sb, int add, uint8_t * dst8){
  616. snow_inner_add_yblock_sse2_header
  617. snow_inner_add_yblock_sse2_start_8("xmm1", "xmm5", "3", "0")
  618. snow_inner_add_yblock_sse2_accum_8("2", "8")
  619. snow_inner_add_yblock_sse2_accum_8("1", "128")
  620. snow_inner_add_yblock_sse2_accum_8("0", "136")
  621. "mov %0, %%"REG_d" \n\t"
  622. "movdqa (%%"REG_D"), %%xmm0 \n\t"
  623. "movdqa %%xmm1, %%xmm2 \n\t"
  624. "punpckhwd %%xmm7, %%xmm1 \n\t"
  625. "punpcklwd %%xmm7, %%xmm2 \n\t"
  626. "paddd %%xmm2, %%xmm0 \n\t"
  627. "movdqa 16(%%"REG_D"), %%xmm2 \n\t"
  628. "paddd %%xmm1, %%xmm2 \n\t"
  629. "paddd %%xmm3, %%xmm0 \n\t"
  630. "paddd %%xmm3, %%xmm2 \n\t"
  631. "mov %1, %%"REG_D" \n\t"
  632. "mov "PTR_SIZE"(%%"REG_D"), %%"REG_D";\n\t"
  633. "add %3, %%"REG_D" \n\t"
  634. "movdqa (%%"REG_D"), %%xmm4 \n\t"
  635. "movdqa %%xmm5, %%xmm6 \n\t"
  636. "punpckhwd %%xmm7, %%xmm5 \n\t"
  637. "punpcklwd %%xmm7, %%xmm6 \n\t"
  638. "paddd %%xmm6, %%xmm4 \n\t"
  639. "movdqa 16(%%"REG_D"), %%xmm6 \n\t"
  640. "paddd %%xmm5, %%xmm6 \n\t"
  641. "paddd %%xmm3, %%xmm4 \n\t"
  642. "paddd %%xmm3, %%xmm6 \n\t"
  643. "psrad $8, %%xmm0 \n\t" /* FRAC_BITS. */
  644. "psrad $8, %%xmm2 \n\t" /* FRAC_BITS. */
  645. "packssdw %%xmm2, %%xmm0 \n\t"
  646. "packuswb %%xmm7, %%xmm0 \n\t"
  647. "movq %%xmm0, (%%"REG_d") \n\t"
  648. "psrad $8, %%xmm4 \n\t" /* FRAC_BITS. */
  649. "psrad $8, %%xmm6 \n\t" /* FRAC_BITS. */
  650. "packssdw %%xmm6, %%xmm4 \n\t"
  651. "packuswb %%xmm7, %%xmm4 \n\t"
  652. "movq %%xmm4, (%%"REG_d",%%"REG_c");\n\t"
  653. snow_inner_add_yblock_sse2_end_8
  654. }
  655. static void inner_add_yblock_bw_16_obmc_32_sse2(const uint8_t *obmc, const long obmc_stride, uint8_t * * block, int b_w, long b_h,
  656. int src_x, int src_y, long src_stride, slice_buffer * sb, int add, uint8_t * dst8){
  657. snow_inner_add_yblock_sse2_header
  658. snow_inner_add_yblock_sse2_start_16("xmm1", "xmm5", "3", "0")
  659. snow_inner_add_yblock_sse2_accum_16("2", "16")
  660. snow_inner_add_yblock_sse2_accum_16("1", "512")
  661. snow_inner_add_yblock_sse2_accum_16("0", "528")
  662. "mov %0, %%"REG_d" \n\t"
  663. "movdqa %%xmm1, %%xmm0 \n\t"
  664. "movdqa %%xmm5, %%xmm4 \n\t"
  665. "punpcklwd %%xmm7, %%xmm0 \n\t"
  666. "paddd (%%"REG_D"), %%xmm0 \n\t"
  667. "punpckhwd %%xmm7, %%xmm1 \n\t"
  668. "paddd 16(%%"REG_D"), %%xmm1 \n\t"
  669. "punpcklwd %%xmm7, %%xmm4 \n\t"
  670. "paddd 32(%%"REG_D"), %%xmm4 \n\t"
  671. "punpckhwd %%xmm7, %%xmm5 \n\t"
  672. "paddd 48(%%"REG_D"), %%xmm5 \n\t"
  673. "paddd %%xmm3, %%xmm0 \n\t"
  674. "paddd %%xmm3, %%xmm1 \n\t"
  675. "paddd %%xmm3, %%xmm4 \n\t"
  676. "paddd %%xmm3, %%xmm5 \n\t"
  677. "psrad $8, %%xmm0 \n\t" /* FRAC_BITS. */
  678. "psrad $8, %%xmm1 \n\t" /* FRAC_BITS. */
  679. "psrad $8, %%xmm4 \n\t" /* FRAC_BITS. */
  680. "psrad $8, %%xmm5 \n\t" /* FRAC_BITS. */
  681. "packssdw %%xmm1, %%xmm0 \n\t"
  682. "packssdw %%xmm5, %%xmm4 \n\t"
  683. "packuswb %%xmm4, %%xmm0 \n\t"
  684. "movdqu %%xmm0, (%%"REG_d") \n\t"
  685. snow_inner_add_yblock_sse2_end_16
  686. }
  687. #define snow_inner_add_yblock_mmx_header \
  688. DWTELEM * * dst_array = sb->line + src_y;\
  689. long tmp;\
  690. asm volatile(\
  691. "mov %7, %%"REG_c" \n\t"\
  692. "mov %6, %2 \n\t"\
  693. "mov %4, %%"REG_S" \n\t"\
  694. "pxor %%mm7, %%mm7 \n\t" /* 0 */\
  695. "pcmpeqd %%mm3, %%mm3 \n\t"\
  696. "pslld $31, %%mm3 \n\t"\
  697. "psrld $24, %%mm3 \n\t" /* FRAC_BITS >> 1 */\
  698. "1: \n\t"\
  699. "mov %1, %%"REG_D" \n\t"\
  700. "mov (%%"REG_D"), %%"REG_D" \n\t"\
  701. "add %3, %%"REG_D" \n\t"
  702. #define snow_inner_add_yblock_mmx_start(out_reg1, out_reg2, ptr_offset, s_offset, d_offset)\
  703. "mov "PTR_SIZE"*"ptr_offset"(%%"REG_a"), %%"REG_d"; \n\t"\
  704. "movd "d_offset"(%%"REG_d"), %%"out_reg1" \n\t"\
  705. "movd "d_offset"+4(%%"REG_d"), %%"out_reg2" \n\t"\
  706. "punpcklbw %%mm7, %%"out_reg1" \n\t"\
  707. "punpcklbw %%mm7, %%"out_reg2" \n\t"\
  708. "movd "s_offset"(%%"REG_S"), %%mm0 \n\t"\
  709. "movd "s_offset"+4(%%"REG_S"), %%mm4 \n\t"\
  710. "punpcklbw %%mm7, %%mm0 \n\t"\
  711. "punpcklbw %%mm7, %%mm4 \n\t"\
  712. "pmullw %%mm0, %%"out_reg1" \n\t"\
  713. "pmullw %%mm4, %%"out_reg2" \n\t"
  714. #define snow_inner_add_yblock_mmx_accum(ptr_offset, s_offset, d_offset) \
  715. snow_inner_add_yblock_mmx_start("mm2", "mm6", ptr_offset, s_offset, d_offset)\
  716. "paddusw %%mm2, %%mm1 \n\t"\
  717. "paddusw %%mm6, %%mm5 \n\t"
  718. #define snow_inner_add_yblock_mmx_mix(read_offset, write_offset)\
  719. "mov %0, %%"REG_d" \n\t"\
  720. "movq %%mm1, %%mm0 \n\t"\
  721. "movq %%mm5, %%mm4 \n\t"\
  722. "punpcklwd %%mm7, %%mm0 \n\t"\
  723. "paddd "read_offset"(%%"REG_D"), %%mm0 \n\t"\
  724. "punpckhwd %%mm7, %%mm1 \n\t"\
  725. "paddd "read_offset"+8(%%"REG_D"), %%mm1 \n\t"\
  726. "punpcklwd %%mm7, %%mm4 \n\t"\
  727. "paddd "read_offset"+16(%%"REG_D"), %%mm4 \n\t"\
  728. "punpckhwd %%mm7, %%mm5 \n\t"\
  729. "paddd "read_offset"+24(%%"REG_D"), %%mm5 \n\t"\
  730. "paddd %%mm3, %%mm0 \n\t"\
  731. "paddd %%mm3, %%mm1 \n\t"\
  732. "paddd %%mm3, %%mm4 \n\t"\
  733. "paddd %%mm3, %%mm5 \n\t"\
  734. "psrad $8, %%mm0 \n\t"\
  735. "psrad $8, %%mm1 \n\t"\
  736. "psrad $8, %%mm4 \n\t"\
  737. "psrad $8, %%mm5 \n\t"\
  738. \
  739. "packssdw %%mm1, %%mm0 \n\t"\
  740. "packssdw %%mm5, %%mm4 \n\t"\
  741. "packuswb %%mm4, %%mm0 \n\t"\
  742. "movq %%mm0, "write_offset"(%%"REG_d") \n\t"
  743. #define snow_inner_add_yblock_mmx_end(s_step)\
  744. "add $"s_step", %%"REG_S" \n\t"\
  745. "add %%"REG_c", "PTR_SIZE"*3(%%"REG_a");\n\t"\
  746. "add %%"REG_c", "PTR_SIZE"*2(%%"REG_a");\n\t"\
  747. "add %%"REG_c", "PTR_SIZE"*1(%%"REG_a");\n\t"\
  748. "add %%"REG_c", (%%"REG_a") \n\t"\
  749. "add $"PTR_SIZE"*1, %1 \n\t"\
  750. "add %%"REG_c", %0 \n\t"\
  751. "dec %2 \n\t"\
  752. "jnz 1b \n\t"\
  753. :"+m"(dst8),"+m"(dst_array),"=&r"(tmp)\
  754. :\
  755. "rm"((long)(src_x<<2)),"m"(obmc),"a"(block),"m"((long)b_h),"m"((long)src_stride):\
  756. "%"REG_c"","%"REG_S"","%"REG_D"","%"REG_d"");
  757. static void inner_add_yblock_bw_8_obmc_16_mmx(const uint8_t *obmc, const long obmc_stride, uint8_t * * block, int b_w, long b_h,
  758. int src_x, int src_y, long src_stride, slice_buffer * sb, int add, uint8_t * dst8){
  759. snow_inner_add_yblock_mmx_header
  760. snow_inner_add_yblock_mmx_start("mm1", "mm5", "3", "0", "0")
  761. snow_inner_add_yblock_mmx_accum("2", "8", "0")
  762. snow_inner_add_yblock_mmx_accum("1", "128", "0")
  763. snow_inner_add_yblock_mmx_accum("0", "136", "0")
  764. snow_inner_add_yblock_mmx_mix("0", "0")
  765. snow_inner_add_yblock_mmx_end("16")
  766. }
  767. static void inner_add_yblock_bw_16_obmc_32_mmx(const uint8_t *obmc, const long obmc_stride, uint8_t * * block, int b_w, long b_h,
  768. int src_x, int src_y, long src_stride, slice_buffer * sb, int add, uint8_t * dst8){
  769. snow_inner_add_yblock_mmx_header
  770. snow_inner_add_yblock_mmx_start("mm1", "mm5", "3", "0", "0")
  771. snow_inner_add_yblock_mmx_accum("2", "16", "0")
  772. snow_inner_add_yblock_mmx_accum("1", "512", "0")
  773. snow_inner_add_yblock_mmx_accum("0", "528", "0")
  774. snow_inner_add_yblock_mmx_mix("0", "0")
  775. snow_inner_add_yblock_mmx_start("mm1", "mm5", "3", "8", "8")
  776. snow_inner_add_yblock_mmx_accum("2", "24", "8")
  777. snow_inner_add_yblock_mmx_accum("1", "520", "8")
  778. snow_inner_add_yblock_mmx_accum("0", "536", "8")
  779. snow_inner_add_yblock_mmx_mix("32", "8")
  780. snow_inner_add_yblock_mmx_end("32")
  781. }
  782. void ff_snow_inner_add_yblock_sse2(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
  783. int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8){
  784. if (b_w == 16)
  785. inner_add_yblock_bw_16_obmc_32_sse2(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
  786. else if (b_w == 8 && obmc_stride == 16) {
  787. if (!(b_h & 1))
  788. inner_add_yblock_bw_8_obmc_16_bh_even_sse2(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
  789. else
  790. inner_add_yblock_bw_8_obmc_16_mmx(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
  791. } else
  792. ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
  793. }
  794. void ff_snow_inner_add_yblock_mmx(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
  795. int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8){
  796. if (b_w == 16)
  797. inner_add_yblock_bw_16_obmc_32_mmx(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
  798. else if (b_w == 8 && obmc_stride == 16)
  799. inner_add_yblock_bw_8_obmc_16_mmx(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
  800. else
  801. ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
  802. }