You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

569 lines
17KB

  1. /*
  2. * The simplest mpeg encoder (well, it was the simplest!)
  3. * Copyright (c) 2000,2001 Gerard Lantau.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  18. *
  19. * Optimized for ia32 cpus by Nick Kurshev <nickols_k@mail.ru>
  20. * h263, mpeg1, mpeg2 dequantizer & draw_edges by Michael Niedermayer <michaelni@gmx.at>
  21. */
  22. #include "../dsputil.h"
  23. #include "../mpegvideo.h"
  24. #include "../avcodec.h"
  25. #include "../mangle.h"
  26. extern UINT8 zigzag_end[64];
  27. extern UINT8 zigzag_direct_noperm[64];
  28. extern UINT16 inv_zigzag_direct16[64];
  29. extern UINT32 inverse[256];
  30. #if 0
  31. /* XXX: GL: I don't understand why this function needs optimization
  32. (it is called only once per frame!), so I disabled it */
  33. void MPV_frame_start(MpegEncContext *s)
  34. {
  35. if (s->pict_type == B_TYPE) {
  36. __asm __volatile(
  37. "movl (%1), %%eax\n\t"
  38. "movl 4(%1), %%edx\n\t"
  39. "movl 8(%1), %%ecx\n\t"
  40. "movl %%eax, (%0)\n\t"
  41. "movl %%edx, 4(%0)\n\t"
  42. "movl %%ecx, 8(%0)\n\t"
  43. :
  44. :"r"(s->current_picture), "r"(s->aux_picture)
  45. :"eax","edx","ecx","memory");
  46. } else {
  47. /* swap next and last */
  48. __asm __volatile(
  49. "movl (%1), %%eax\n\t"
  50. "movl 4(%1), %%edx\n\t"
  51. "movl 8(%1), %%ecx\n\t"
  52. "xchgl (%0), %%eax\n\t"
  53. "xchgl 4(%0), %%edx\n\t"
  54. "xchgl 8(%0), %%ecx\n\t"
  55. "movl %%eax, (%1)\n\t"
  56. "movl %%edx, 4(%1)\n\t"
  57. "movl %%ecx, 8(%1)\n\t"
  58. "movl %%eax, (%2)\n\t"
  59. "movl %%edx, 4(%2)\n\t"
  60. "movl %%ecx, 8(%2)\n\t"
  61. :
  62. :"r"(s->last_picture), "r"(s->next_picture), "r"(s->current_picture)
  63. :"eax","edx","ecx","memory");
  64. }
  65. }
  66. #endif
  67. static const unsigned long long int mm_wabs __attribute__ ((aligned(8))) = 0xffffffffffffffffULL;
  68. static const unsigned long long int mm_wone __attribute__ ((aligned(8))) = 0x0001000100010001ULL;
  69. static void dct_unquantize_h263_mmx(MpegEncContext *s,
  70. DCTELEM *block, int n, int qscale)
  71. {
  72. int i, level, qmul, qadd, nCoeffs;
  73. qmul = s->qscale << 1;
  74. if (s->h263_aic && s->mb_intra)
  75. qadd = 0;
  76. else
  77. qadd = (s->qscale - 1) | 1;
  78. if (s->mb_intra) {
  79. if (!s->h263_aic) {
  80. if (n < 4)
  81. block[0] = block[0] * s->y_dc_scale;
  82. else
  83. block[0] = block[0] * s->c_dc_scale;
  84. }
  85. for(i=1; i<8; i++) {
  86. level = block[i];
  87. if (level) {
  88. if (level < 0) {
  89. level = level * qmul - qadd;
  90. } else {
  91. level = level * qmul + qadd;
  92. }
  93. block[i] = level;
  94. }
  95. }
  96. nCoeffs=64;
  97. } else {
  98. i = 0;
  99. nCoeffs= zigzag_end[ s->block_last_index[n] ];
  100. }
  101. //printf("%d %d ", qmul, qadd);
  102. asm volatile(
  103. "movd %1, %%mm6 \n\t" //qmul
  104. "packssdw %%mm6, %%mm6 \n\t"
  105. "packssdw %%mm6, %%mm6 \n\t"
  106. "movd %2, %%mm5 \n\t" //qadd
  107. "pxor %%mm7, %%mm7 \n\t"
  108. "packssdw %%mm5, %%mm5 \n\t"
  109. "packssdw %%mm5, %%mm5 \n\t"
  110. "psubw %%mm5, %%mm7 \n\t"
  111. "pxor %%mm4, %%mm4 \n\t"
  112. ".balign 16\n\t"
  113. "1: \n\t"
  114. "movq (%0, %3), %%mm0 \n\t"
  115. "movq 8(%0, %3), %%mm1 \n\t"
  116. "pmullw %%mm6, %%mm0 \n\t"
  117. "pmullw %%mm6, %%mm1 \n\t"
  118. "movq (%0, %3), %%mm2 \n\t"
  119. "movq 8(%0, %3), %%mm3 \n\t"
  120. "pcmpgtw %%mm4, %%mm2 \n\t" // block[i] < 0 ? -1 : 0
  121. "pcmpgtw %%mm4, %%mm3 \n\t" // block[i] < 0 ? -1 : 0
  122. "pxor %%mm2, %%mm0 \n\t"
  123. "pxor %%mm3, %%mm1 \n\t"
  124. "paddw %%mm7, %%mm0 \n\t"
  125. "paddw %%mm7, %%mm1 \n\t"
  126. "pxor %%mm0, %%mm2 \n\t"
  127. "pxor %%mm1, %%mm3 \n\t"
  128. "pcmpeqw %%mm7, %%mm0 \n\t" // block[i] == 0 ? -1 : 0
  129. "pcmpeqw %%mm7, %%mm1 \n\t" // block[i] == 0 ? -1 : 0
  130. "pandn %%mm2, %%mm0 \n\t"
  131. "pandn %%mm3, %%mm1 \n\t"
  132. "movq %%mm0, (%0, %3) \n\t"
  133. "movq %%mm1, 8(%0, %3) \n\t"
  134. "addl $16, %3 \n\t"
  135. "js 1b \n\t"
  136. ::"r" (block+nCoeffs), "g"(qmul), "g" (qadd), "r" (2*(i-nCoeffs))
  137. : "memory"
  138. );
  139. }
  140. /*
  141. NK:
  142. Note: looking at PARANOID:
  143. "enable all paranoid tests for rounding, overflows, etc..."
  144. #ifdef PARANOID
  145. if (level < -2048 || level > 2047)
  146. fprintf(stderr, "unquant error %d %d\n", i, level);
  147. #endif
  148. We can suppose that result of two multiplications can't be greate of 0xFFFF
  149. i.e. is 16-bit, so we use here only PMULLW instruction and can avoid
  150. a complex multiplication.
  151. =====================================================
  152. Full formula for multiplication of 2 integer numbers
  153. which are represent as high:low words:
  154. input: value1 = high1:low1
  155. value2 = high2:low2
  156. output: value3 = value1*value2
  157. value3=high3:low3 (on overflow: modulus 2^32 wrap-around)
  158. this mean that for 0x123456 * 0x123456 correct result is 0x766cb0ce4
  159. but this algorithm will compute only 0x66cb0ce4
  160. this limited by 16-bit size of operands
  161. ---------------------------------
  162. tlow1 = high1*low2
  163. tlow2 = high2*low1
  164. tlow1 = tlow1 + tlow2
  165. high3:low3 = low1*low2
  166. high3 += tlow1
  167. */
  168. static void dct_unquantize_mpeg1_mmx(MpegEncContext *s,
  169. DCTELEM *block, int n, int qscale)
  170. {
  171. int nCoeffs;
  172. const UINT16 *quant_matrix;
  173. if(s->alternate_scan) nCoeffs= 64;
  174. else nCoeffs= nCoeffs= zigzag_end[ s->block_last_index[n] ];
  175. if (s->mb_intra) {
  176. int block0;
  177. if (n < 4)
  178. block0 = block[0] * s->y_dc_scale;
  179. else
  180. block0 = block[0] * s->c_dc_scale;
  181. /* XXX: only mpeg1 */
  182. quant_matrix = s->intra_matrix;
  183. asm volatile(
  184. "pcmpeqw %%mm7, %%mm7 \n\t"
  185. "psrlw $15, %%mm7 \n\t"
  186. "movd %2, %%mm6 \n\t"
  187. "packssdw %%mm6, %%mm6 \n\t"
  188. "packssdw %%mm6, %%mm6 \n\t"
  189. "movl %3, %%eax \n\t"
  190. ".balign 16\n\t"
  191. "1: \n\t"
  192. "movq (%0, %%eax), %%mm0 \n\t"
  193. "movq 8(%0, %%eax), %%mm1 \n\t"
  194. "movq (%1, %%eax), %%mm4 \n\t"
  195. "movq 8(%1, %%eax), %%mm5 \n\t"
  196. "pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i]
  197. "pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i]
  198. "pxor %%mm2, %%mm2 \n\t"
  199. "pxor %%mm3, %%mm3 \n\t"
  200. "pcmpgtw %%mm0, %%mm2 \n\t" // block[i] < 0 ? -1 : 0
  201. "pcmpgtw %%mm1, %%mm3 \n\t" // block[i] < 0 ? -1 : 0
  202. "pxor %%mm2, %%mm0 \n\t"
  203. "pxor %%mm3, %%mm1 \n\t"
  204. "psubw %%mm2, %%mm0 \n\t" // abs(block[i])
  205. "psubw %%mm3, %%mm1 \n\t" // abs(block[i])
  206. "pmullw %%mm4, %%mm0 \n\t" // abs(block[i])*q
  207. "pmullw %%mm5, %%mm1 \n\t" // abs(block[i])*q
  208. "pxor %%mm4, %%mm4 \n\t"
  209. "pxor %%mm5, %%mm5 \n\t" // FIXME slow
  210. "pcmpeqw (%0, %%eax), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
  211. "pcmpeqw 8(%0, %%eax), %%mm5 \n\t" // block[i] == 0 ? -1 : 0
  212. "psraw $3, %%mm0 \n\t"
  213. "psraw $3, %%mm1 \n\t"
  214. "psubw %%mm7, %%mm0 \n\t"
  215. "psubw %%mm7, %%mm1 \n\t"
  216. "por %%mm7, %%mm0 \n\t"
  217. "por %%mm7, %%mm1 \n\t"
  218. "pxor %%mm2, %%mm0 \n\t"
  219. "pxor %%mm3, %%mm1 \n\t"
  220. "psubw %%mm2, %%mm0 \n\t"
  221. "psubw %%mm3, %%mm1 \n\t"
  222. "pandn %%mm0, %%mm4 \n\t"
  223. "pandn %%mm1, %%mm5 \n\t"
  224. "movq %%mm4, (%0, %%eax) \n\t"
  225. "movq %%mm5, 8(%0, %%eax) \n\t"
  226. "addl $16, %%eax \n\t"
  227. "js 1b \n\t"
  228. ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "g" (qscale), "g" (-2*nCoeffs)
  229. : "%eax", "memory"
  230. );
  231. block[0]= block0;
  232. } else {
  233. quant_matrix = s->inter_matrix;
  234. asm volatile(
  235. "pcmpeqw %%mm7, %%mm7 \n\t"
  236. "psrlw $15, %%mm7 \n\t"
  237. "movd %2, %%mm6 \n\t"
  238. "packssdw %%mm6, %%mm6 \n\t"
  239. "packssdw %%mm6, %%mm6 \n\t"
  240. "movl %3, %%eax \n\t"
  241. ".balign 16\n\t"
  242. "1: \n\t"
  243. "movq (%0, %%eax), %%mm0 \n\t"
  244. "movq 8(%0, %%eax), %%mm1 \n\t"
  245. "movq (%1, %%eax), %%mm4 \n\t"
  246. "movq 8(%1, %%eax), %%mm5 \n\t"
  247. "pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i]
  248. "pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i]
  249. "pxor %%mm2, %%mm2 \n\t"
  250. "pxor %%mm3, %%mm3 \n\t"
  251. "pcmpgtw %%mm0, %%mm2 \n\t" // block[i] < 0 ? -1 : 0
  252. "pcmpgtw %%mm1, %%mm3 \n\t" // block[i] < 0 ? -1 : 0
  253. "pxor %%mm2, %%mm0 \n\t"
  254. "pxor %%mm3, %%mm1 \n\t"
  255. "psubw %%mm2, %%mm0 \n\t" // abs(block[i])
  256. "psubw %%mm3, %%mm1 \n\t" // abs(block[i])
  257. "paddw %%mm0, %%mm0 \n\t" // abs(block[i])*2
  258. "paddw %%mm1, %%mm1 \n\t" // abs(block[i])*2
  259. "paddw %%mm7, %%mm0 \n\t" // abs(block[i])*2 + 1
  260. "paddw %%mm7, %%mm1 \n\t" // abs(block[i])*2 + 1
  261. "pmullw %%mm4, %%mm0 \n\t" // (abs(block[i])*2 + 1)*q
  262. "pmullw %%mm5, %%mm1 \n\t" // (abs(block[i])*2 + 1)*q
  263. "pxor %%mm4, %%mm4 \n\t"
  264. "pxor %%mm5, %%mm5 \n\t" // FIXME slow
  265. "pcmpeqw (%0, %%eax), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
  266. "pcmpeqw 8(%0, %%eax), %%mm5 \n\t" // block[i] == 0 ? -1 : 0
  267. "psraw $4, %%mm0 \n\t"
  268. "psraw $4, %%mm1 \n\t"
  269. "psubw %%mm7, %%mm0 \n\t"
  270. "psubw %%mm7, %%mm1 \n\t"
  271. "por %%mm7, %%mm0 \n\t"
  272. "por %%mm7, %%mm1 \n\t"
  273. "pxor %%mm2, %%mm0 \n\t"
  274. "pxor %%mm3, %%mm1 \n\t"
  275. "psubw %%mm2, %%mm0 \n\t"
  276. "psubw %%mm3, %%mm1 \n\t"
  277. "pandn %%mm0, %%mm4 \n\t"
  278. "pandn %%mm1, %%mm5 \n\t"
  279. "movq %%mm4, (%0, %%eax) \n\t"
  280. "movq %%mm5, 8(%0, %%eax) \n\t"
  281. "addl $16, %%eax \n\t"
  282. "js 1b \n\t"
  283. ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "g" (qscale), "g" (-2*nCoeffs)
  284. : "%eax", "memory"
  285. );
  286. }
  287. }
  288. static void dct_unquantize_mpeg2_mmx(MpegEncContext *s,
  289. DCTELEM *block, int n, int qscale)
  290. {
  291. int nCoeffs;
  292. const UINT16 *quant_matrix;
  293. if(s->alternate_scan) nCoeffs= 64;
  294. else nCoeffs= nCoeffs= zigzag_end[ s->block_last_index[n] ];
  295. if (s->mb_intra) {
  296. int block0;
  297. if (n < 4)
  298. block0 = block[0] * s->y_dc_scale;
  299. else
  300. block0 = block[0] * s->c_dc_scale;
  301. quant_matrix = s->intra_matrix;
  302. asm volatile(
  303. "pcmpeqw %%mm7, %%mm7 \n\t"
  304. "psrlw $15, %%mm7 \n\t"
  305. "movd %2, %%mm6 \n\t"
  306. "packssdw %%mm6, %%mm6 \n\t"
  307. "packssdw %%mm6, %%mm6 \n\t"
  308. "movl %3, %%eax \n\t"
  309. ".balign 16\n\t"
  310. "1: \n\t"
  311. "movq (%0, %%eax), %%mm0 \n\t"
  312. "movq 8(%0, %%eax), %%mm1 \n\t"
  313. "movq (%1, %%eax), %%mm4 \n\t"
  314. "movq 8(%1, %%eax), %%mm5 \n\t"
  315. "pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i]
  316. "pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i]
  317. "pxor %%mm2, %%mm2 \n\t"
  318. "pxor %%mm3, %%mm3 \n\t"
  319. "pcmpgtw %%mm0, %%mm2 \n\t" // block[i] < 0 ? -1 : 0
  320. "pcmpgtw %%mm1, %%mm3 \n\t" // block[i] < 0 ? -1 : 0
  321. "pxor %%mm2, %%mm0 \n\t"
  322. "pxor %%mm3, %%mm1 \n\t"
  323. "psubw %%mm2, %%mm0 \n\t" // abs(block[i])
  324. "psubw %%mm3, %%mm1 \n\t" // abs(block[i])
  325. "pmullw %%mm4, %%mm0 \n\t" // abs(block[i])*q
  326. "pmullw %%mm5, %%mm1 \n\t" // abs(block[i])*q
  327. "pxor %%mm4, %%mm4 \n\t"
  328. "pxor %%mm5, %%mm5 \n\t" // FIXME slow
  329. "pcmpeqw (%0, %%eax), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
  330. "pcmpeqw 8(%0, %%eax), %%mm5 \n\t" // block[i] == 0 ? -1 : 0
  331. "psraw $3, %%mm0 \n\t"
  332. "psraw $3, %%mm1 \n\t"
  333. "pxor %%mm2, %%mm0 \n\t"
  334. "pxor %%mm3, %%mm1 \n\t"
  335. "psubw %%mm2, %%mm0 \n\t"
  336. "psubw %%mm3, %%mm1 \n\t"
  337. "pandn %%mm0, %%mm4 \n\t"
  338. "pandn %%mm1, %%mm5 \n\t"
  339. "movq %%mm4, (%0, %%eax) \n\t"
  340. "movq %%mm5, 8(%0, %%eax) \n\t"
  341. "addl $16, %%eax \n\t"
  342. "js 1b \n\t"
  343. ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "g" (qscale), "g" (-2*nCoeffs)
  344. : "%eax", "memory"
  345. );
  346. block[0]= block0;
  347. //Note, we dont do mismatch control for intra as errors cannot accumulate
  348. } else {
  349. quant_matrix = s->inter_matrix;
  350. asm volatile(
  351. "pcmpeqw %%mm7, %%mm7 \n\t"
  352. "psrlq $48, %%mm7 \n\t"
  353. "movd %2, %%mm6 \n\t"
  354. "packssdw %%mm6, %%mm6 \n\t"
  355. "packssdw %%mm6, %%mm6 \n\t"
  356. "movl %3, %%eax \n\t"
  357. ".balign 16\n\t"
  358. "1: \n\t"
  359. "movq (%0, %%eax), %%mm0 \n\t"
  360. "movq 8(%0, %%eax), %%mm1 \n\t"
  361. "movq (%1, %%eax), %%mm4 \n\t"
  362. "movq 8(%1, %%eax), %%mm5 \n\t"
  363. "pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i]
  364. "pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i]
  365. "pxor %%mm2, %%mm2 \n\t"
  366. "pxor %%mm3, %%mm3 \n\t"
  367. "pcmpgtw %%mm0, %%mm2 \n\t" // block[i] < 0 ? -1 : 0
  368. "pcmpgtw %%mm1, %%mm3 \n\t" // block[i] < 0 ? -1 : 0
  369. "pxor %%mm2, %%mm0 \n\t"
  370. "pxor %%mm3, %%mm1 \n\t"
  371. "psubw %%mm2, %%mm0 \n\t" // abs(block[i])
  372. "psubw %%mm3, %%mm1 \n\t" // abs(block[i])
  373. "paddw %%mm0, %%mm0 \n\t" // abs(block[i])*2
  374. "paddw %%mm1, %%mm1 \n\t" // abs(block[i])*2
  375. "pmullw %%mm4, %%mm0 \n\t" // abs(block[i])*2*q
  376. "pmullw %%mm5, %%mm1 \n\t" // abs(block[i])*2*q
  377. "paddw %%mm4, %%mm0 \n\t" // (abs(block[i])*2 + 1)*q
  378. "paddw %%mm5, %%mm1 \n\t" // (abs(block[i])*2 + 1)*q
  379. "pxor %%mm4, %%mm4 \n\t"
  380. "pxor %%mm5, %%mm5 \n\t" // FIXME slow
  381. "pcmpeqw (%0, %%eax), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
  382. "pcmpeqw 8(%0, %%eax), %%mm5 \n\t" // block[i] == 0 ? -1 : 0
  383. "psrlw $4, %%mm0 \n\t"
  384. "psrlw $4, %%mm1 \n\t"
  385. "pxor %%mm2, %%mm0 \n\t"
  386. "pxor %%mm3, %%mm1 \n\t"
  387. "psubw %%mm2, %%mm0 \n\t"
  388. "psubw %%mm3, %%mm1 \n\t"
  389. "pandn %%mm0, %%mm4 \n\t"
  390. "pandn %%mm1, %%mm5 \n\t"
  391. "pxor %%mm4, %%mm7 \n\t"
  392. "pxor %%mm5, %%mm7 \n\t"
  393. "movq %%mm4, (%0, %%eax) \n\t"
  394. "movq %%mm5, 8(%0, %%eax) \n\t"
  395. "addl $16, %%eax \n\t"
  396. "js 1b \n\t"
  397. "movd 124(%0, %3), %%mm0 \n\t"
  398. "movq %%mm7, %%mm6 \n\t"
  399. "psrlq $32, %%mm7 \n\t"
  400. "pxor %%mm6, %%mm7 \n\t"
  401. "movq %%mm7, %%mm6 \n\t"
  402. "psrlq $16, %%mm7 \n\t"
  403. "pxor %%mm6, %%mm7 \n\t"
  404. "pslld $31, %%mm7 \n\t"
  405. "psrlq $15, %%mm7 \n\t"
  406. "pxor %%mm7, %%mm0 \n\t"
  407. "movd %%mm0, 124(%0, %3) \n\t"
  408. ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "g" (qscale), "r" (-2*nCoeffs)
  409. : "%eax", "memory"
  410. );
  411. }
  412. }
  413. /* draw the edges of width 'w' of an image of size width, height
  414. this mmx version can only handle w==8 || w==16 */
  415. static void draw_edges_mmx(UINT8 *buf, int wrap, int width, int height, int w)
  416. {
  417. UINT8 *ptr, *last_line;
  418. int i;
  419. last_line = buf + (height - 1) * wrap;
  420. /* left and right */
  421. ptr = buf;
  422. if(w==8)
  423. {
  424. asm volatile(
  425. "1: \n\t"
  426. "movd (%0), %%mm0 \n\t"
  427. "punpcklbw %%mm0, %%mm0 \n\t"
  428. "punpcklwd %%mm0, %%mm0 \n\t"
  429. "punpckldq %%mm0, %%mm0 \n\t"
  430. "movq %%mm0, -8(%0) \n\t"
  431. "movq -8(%0, %2), %%mm1 \n\t"
  432. "punpckhbw %%mm1, %%mm1 \n\t"
  433. "punpckhwd %%mm1, %%mm1 \n\t"
  434. "punpckhdq %%mm1, %%mm1 \n\t"
  435. "movq %%mm1, (%0, %2) \n\t"
  436. "addl %1, %0 \n\t"
  437. "cmpl %3, %0 \n\t"
  438. " jb 1b \n\t"
  439. : "+r" (ptr)
  440. : "r" (wrap), "r" (width), "r" (ptr + wrap*height)
  441. );
  442. }
  443. else
  444. {
  445. asm volatile(
  446. "1: \n\t"
  447. "movd (%0), %%mm0 \n\t"
  448. "punpcklbw %%mm0, %%mm0 \n\t"
  449. "punpcklwd %%mm0, %%mm0 \n\t"
  450. "punpckldq %%mm0, %%mm0 \n\t"
  451. "movq %%mm0, -8(%0) \n\t"
  452. "movq %%mm0, -16(%0) \n\t"
  453. "movq -8(%0, %2), %%mm1 \n\t"
  454. "punpckhbw %%mm1, %%mm1 \n\t"
  455. "punpckhwd %%mm1, %%mm1 \n\t"
  456. "punpckhdq %%mm1, %%mm1 \n\t"
  457. "movq %%mm1, (%0, %2) \n\t"
  458. "movq %%mm1, 8(%0, %2) \n\t"
  459. "addl %1, %0 \n\t"
  460. "cmpl %3, %0 \n\t"
  461. " jb 1b \n\t"
  462. : "+r" (ptr)
  463. : "r" (wrap), "r" (width), "r" (ptr + wrap*height)
  464. );
  465. }
  466. for(i=0;i<w;i+=4) {
  467. /* top and bottom (and hopefully also the corners) */
  468. ptr= buf - (i + 1) * wrap - w;
  469. asm volatile(
  470. "1: \n\t"
  471. "movq (%1, %0), %%mm0 \n\t"
  472. "movq %%mm0, (%0) \n\t"
  473. "movq %%mm0, (%0, %2) \n\t"
  474. "movq %%mm0, (%0, %2, 2) \n\t"
  475. "movq %%mm0, (%0, %3) \n\t"
  476. "addl $8, %0 \n\t"
  477. "cmpl %4, %0 \n\t"
  478. " jb 1b \n\t"
  479. : "+r" (ptr)
  480. : "r" ((int)buf - (int)ptr - w), "r" (-wrap), "r" (-wrap*3), "r" (ptr+width+2*w)
  481. );
  482. ptr= last_line + (i + 1) * wrap - w;
  483. asm volatile(
  484. "1: \n\t"
  485. "movq (%1, %0), %%mm0 \n\t"
  486. "movq %%mm0, (%0) \n\t"
  487. "movq %%mm0, (%0, %2) \n\t"
  488. "movq %%mm0, (%0, %2, 2) \n\t"
  489. "movq %%mm0, (%0, %3) \n\t"
  490. "addl $8, %0 \n\t"
  491. "cmpl %4, %0 \n\t"
  492. " jb 1b \n\t"
  493. : "+r" (ptr)
  494. : "r" ((int)last_line - (int)ptr - w), "r" (wrap), "r" (wrap*3), "r" (ptr+width+2*w)
  495. );
  496. }
  497. }
  498. static volatile int esp_temp;
  499. void unused_var_warning_killer(){
  500. esp_temp++;
  501. }
  502. #undef HAVE_MMX2
  503. #define RENAME(a) a ## _MMX
  504. #include "mpegvideo_mmx_template.c"
  505. #define HAVE_MMX2
  506. #undef RENAME
  507. #define RENAME(a) a ## _MMX2
  508. #include "mpegvideo_mmx_template.c"
  509. void MPV_common_init_mmx(MpegEncContext *s)
  510. {
  511. if (mm_flags & MM_MMX) {
  512. s->dct_unquantize_h263 = dct_unquantize_h263_mmx;
  513. s->dct_unquantize_mpeg1 = dct_unquantize_mpeg1_mmx;
  514. s->dct_unquantize_mpeg2 = dct_unquantize_mpeg2_mmx;
  515. draw_edges = draw_edges_mmx;
  516. if(mm_flags & MM_MMXEXT){
  517. dct_quantize= dct_quantize_MMX2;
  518. } else {
  519. dct_quantize= dct_quantize_MMX;
  520. }
  521. }
  522. }