You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

990 lines
37KB

  1. /*
  2. * Copyright (c) 2000,2001 Fabrice Bellard
  3. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  4. *
  5. * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #include <string.h>
  24. #include "libavutil/avassert.h"
  25. #include "libavutil/internal.h"
  26. #include "avcodec.h"
  27. #include "h261.h"
  28. #include "mpegutils.h"
  29. #include "mpegvideo.h"
  30. #include "mjpegenc.h"
  31. #include "msmpeg4.h"
  32. #include "qpeldsp.h"
  33. #include <limits.h>
  34. static void gmc1_motion(MpegEncContext *s,
  35. uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
  36. uint8_t **ref_picture)
  37. {
  38. uint8_t *ptr;
  39. int src_x, src_y, motion_x, motion_y;
  40. ptrdiff_t offset, linesize, uvlinesize;
  41. int emu = 0;
  42. motion_x = s->sprite_offset[0][0];
  43. motion_y = s->sprite_offset[0][1];
  44. src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy + 1));
  45. src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy + 1));
  46. motion_x <<= (3 - s->sprite_warping_accuracy);
  47. motion_y <<= (3 - s->sprite_warping_accuracy);
  48. src_x = av_clip(src_x, -16, s->width);
  49. if (src_x == s->width)
  50. motion_x = 0;
  51. src_y = av_clip(src_y, -16, s->height);
  52. if (src_y == s->height)
  53. motion_y = 0;
  54. linesize = s->linesize;
  55. uvlinesize = s->uvlinesize;
  56. ptr = ref_picture[0] + src_y * linesize + src_x;
  57. if ((unsigned)src_x >= FFMAX(s->h_edge_pos - 17, 0) ||
  58. (unsigned)src_y >= FFMAX(s->v_edge_pos - 17, 0)) {
  59. s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
  60. linesize, linesize,
  61. 17, 17,
  62. src_x, src_y,
  63. s->h_edge_pos, s->v_edge_pos);
  64. ptr = s->edge_emu_buffer;
  65. }
  66. if ((motion_x | motion_y) & 7) {
  67. s->mdsp.gmc1(dest_y, ptr, linesize, 16,
  68. motion_x & 15, motion_y & 15, 128 - s->no_rounding);
  69. s->mdsp.gmc1(dest_y + 8, ptr + 8, linesize, 16,
  70. motion_x & 15, motion_y & 15, 128 - s->no_rounding);
  71. } else {
  72. int dxy;
  73. dxy = ((motion_x >> 3) & 1) | ((motion_y >> 2) & 2);
  74. if (s->no_rounding) {
  75. s->hdsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
  76. } else {
  77. s->hdsp.put_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
  78. }
  79. }
  80. if (CONFIG_GRAY && s->avctx->flags & CODEC_FLAG_GRAY)
  81. return;
  82. motion_x = s->sprite_offset[1][0];
  83. motion_y = s->sprite_offset[1][1];
  84. src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy + 1));
  85. src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy + 1));
  86. motion_x <<= (3 - s->sprite_warping_accuracy);
  87. motion_y <<= (3 - s->sprite_warping_accuracy);
  88. src_x = av_clip(src_x, -8, s->width >> 1);
  89. if (src_x == s->width >> 1)
  90. motion_x = 0;
  91. src_y = av_clip(src_y, -8, s->height >> 1);
  92. if (src_y == s->height >> 1)
  93. motion_y = 0;
  94. offset = (src_y * uvlinesize) + src_x;
  95. ptr = ref_picture[1] + offset;
  96. if ((unsigned)src_x >= FFMAX((s->h_edge_pos >> 1) - 9, 0) ||
  97. (unsigned)src_y >= FFMAX((s->v_edge_pos >> 1) - 9, 0)) {
  98. s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
  99. uvlinesize, uvlinesize,
  100. 9, 9,
  101. src_x, src_y,
  102. s->h_edge_pos >> 1, s->v_edge_pos >> 1);
  103. ptr = s->edge_emu_buffer;
  104. emu = 1;
  105. }
  106. s->mdsp.gmc1(dest_cb, ptr, uvlinesize, 8,
  107. motion_x & 15, motion_y & 15, 128 - s->no_rounding);
  108. ptr = ref_picture[2] + offset;
  109. if (emu) {
  110. s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
  111. uvlinesize, uvlinesize,
  112. 9, 9,
  113. src_x, src_y,
  114. s->h_edge_pos >> 1, s->v_edge_pos >> 1);
  115. ptr = s->edge_emu_buffer;
  116. }
  117. s->mdsp.gmc1(dest_cr, ptr, uvlinesize, 8,
  118. motion_x & 15, motion_y & 15, 128 - s->no_rounding);
  119. }
  120. static void gmc_motion(MpegEncContext *s,
  121. uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
  122. uint8_t **ref_picture)
  123. {
  124. uint8_t *ptr;
  125. int linesize, uvlinesize;
  126. const int a = s->sprite_warping_accuracy;
  127. int ox, oy;
  128. linesize = s->linesize;
  129. uvlinesize = s->uvlinesize;
  130. ptr = ref_picture[0];
  131. ox = s->sprite_offset[0][0] + s->sprite_delta[0][0] * s->mb_x * 16 +
  132. s->sprite_delta[0][1] * s->mb_y * 16;
  133. oy = s->sprite_offset[0][1] + s->sprite_delta[1][0] * s->mb_x * 16 +
  134. s->sprite_delta[1][1] * s->mb_y * 16;
  135. s->mdsp.gmc(dest_y, ptr, linesize, 16,
  136. ox, oy,
  137. s->sprite_delta[0][0], s->sprite_delta[0][1],
  138. s->sprite_delta[1][0], s->sprite_delta[1][1],
  139. a + 1, (1 << (2 * a + 1)) - s->no_rounding,
  140. s->h_edge_pos, s->v_edge_pos);
  141. s->mdsp.gmc(dest_y + 8, ptr, linesize, 16,
  142. ox + s->sprite_delta[0][0] * 8,
  143. oy + s->sprite_delta[1][0] * 8,
  144. s->sprite_delta[0][0], s->sprite_delta[0][1],
  145. s->sprite_delta[1][0], s->sprite_delta[1][1],
  146. a + 1, (1 << (2 * a + 1)) - s->no_rounding,
  147. s->h_edge_pos, s->v_edge_pos);
  148. if (CONFIG_GRAY && s->avctx->flags & CODEC_FLAG_GRAY)
  149. return;
  150. ox = s->sprite_offset[1][0] + s->sprite_delta[0][0] * s->mb_x * 8 +
  151. s->sprite_delta[0][1] * s->mb_y * 8;
  152. oy = s->sprite_offset[1][1] + s->sprite_delta[1][0] * s->mb_x * 8 +
  153. s->sprite_delta[1][1] * s->mb_y * 8;
  154. ptr = ref_picture[1];
  155. s->mdsp.gmc(dest_cb, ptr, uvlinesize, 8,
  156. ox, oy,
  157. s->sprite_delta[0][0], s->sprite_delta[0][1],
  158. s->sprite_delta[1][0], s->sprite_delta[1][1],
  159. a + 1, (1 << (2 * a + 1)) - s->no_rounding,
  160. (s->h_edge_pos + 1) >> 1, (s->v_edge_pos + 1) >> 1);
  161. ptr = ref_picture[2];
  162. s->mdsp.gmc(dest_cr, ptr, uvlinesize, 8,
  163. ox, oy,
  164. s->sprite_delta[0][0], s->sprite_delta[0][1],
  165. s->sprite_delta[1][0], s->sprite_delta[1][1],
  166. a + 1, (1 << (2 * a + 1)) - s->no_rounding,
  167. (s->h_edge_pos + 1) >> 1, (s->v_edge_pos + 1) >> 1);
  168. }
  169. static inline int hpel_motion(MpegEncContext *s,
  170. uint8_t *dest, uint8_t *src,
  171. int src_x, int src_y,
  172. op_pixels_func *pix_op,
  173. int motion_x, int motion_y)
  174. {
  175. int dxy = 0;
  176. int emu = 0;
  177. src_x += motion_x >> 1;
  178. src_y += motion_y >> 1;
  179. /* WARNING: do no forget half pels */
  180. src_x = av_clip(src_x, -16, s->width); // FIXME unneeded for emu?
  181. if (src_x != s->width)
  182. dxy |= motion_x & 1;
  183. src_y = av_clip(src_y, -16, s->height);
  184. if (src_y != s->height)
  185. dxy |= (motion_y & 1) << 1;
  186. src += src_y * s->linesize + src_x;
  187. if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 1) - 7, 0) ||
  188. (unsigned)src_y >= FFMAX(s->v_edge_pos - (motion_y & 1) - 7, 0)) {
  189. s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
  190. s->linesize, s->linesize,
  191. 9, 9,
  192. src_x, src_y,
  193. s->h_edge_pos, s->v_edge_pos);
  194. src = s->edge_emu_buffer;
  195. emu = 1;
  196. }
  197. pix_op[dxy](dest, src, s->linesize, 8);
  198. return emu;
  199. }
  200. static av_always_inline
  201. void mpeg_motion_internal(MpegEncContext *s,
  202. uint8_t *dest_y,
  203. uint8_t *dest_cb,
  204. uint8_t *dest_cr,
  205. int field_based,
  206. int bottom_field,
  207. int field_select,
  208. uint8_t **ref_picture,
  209. op_pixels_func (*pix_op)[4],
  210. int motion_x,
  211. int motion_y,
  212. int h,
  213. int is_mpeg12,
  214. int mb_y)
  215. {
  216. uint8_t *ptr_y, *ptr_cb, *ptr_cr;
  217. int dxy, uvdxy, mx, my, src_x, src_y,
  218. uvsrc_x, uvsrc_y, v_edge_pos;
  219. ptrdiff_t uvlinesize, linesize;
  220. #if 0
  221. if (s->quarter_sample) {
  222. motion_x >>= 1;
  223. motion_y >>= 1;
  224. }
  225. #endif
  226. v_edge_pos = s->v_edge_pos >> field_based;
  227. linesize = s->current_picture.f->linesize[0] << field_based;
  228. uvlinesize = s->current_picture.f->linesize[1] << field_based;
  229. dxy = ((motion_y & 1) << 1) | (motion_x & 1);
  230. src_x = s->mb_x * 16 + (motion_x >> 1);
  231. src_y = (mb_y << (4 - field_based)) + (motion_y >> 1);
  232. if (!is_mpeg12 && s->out_format == FMT_H263) {
  233. if ((s->workaround_bugs & FF_BUG_HPEL_CHROMA) && field_based) {
  234. mx = (motion_x >> 1) | (motion_x & 1);
  235. my = motion_y >> 1;
  236. uvdxy = ((my & 1) << 1) | (mx & 1);
  237. uvsrc_x = s->mb_x * 8 + (mx >> 1);
  238. uvsrc_y = (mb_y << (3 - field_based)) + (my >> 1);
  239. } else {
  240. uvdxy = dxy | (motion_y & 2) | ((motion_x & 2) >> 1);
  241. uvsrc_x = src_x >> 1;
  242. uvsrc_y = src_y >> 1;
  243. }
  244. // Even chroma mv's are full pel in H261
  245. } else if (!is_mpeg12 && s->out_format == FMT_H261) {
  246. mx = motion_x / 4;
  247. my = motion_y / 4;
  248. uvdxy = 0;
  249. uvsrc_x = s->mb_x * 8 + mx;
  250. uvsrc_y = mb_y * 8 + my;
  251. } else {
  252. if (s->chroma_y_shift) {
  253. mx = motion_x / 2;
  254. my = motion_y / 2;
  255. uvdxy = ((my & 1) << 1) | (mx & 1);
  256. uvsrc_x = s->mb_x * 8 + (mx >> 1);
  257. uvsrc_y = (mb_y << (3 - field_based)) + (my >> 1);
  258. } else {
  259. if (s->chroma_x_shift) {
  260. // Chroma422
  261. mx = motion_x / 2;
  262. uvdxy = ((motion_y & 1) << 1) | (mx & 1);
  263. uvsrc_x = s->mb_x * 8 + (mx >> 1);
  264. uvsrc_y = src_y;
  265. } else {
  266. // Chroma444
  267. uvdxy = dxy;
  268. uvsrc_x = src_x;
  269. uvsrc_y = src_y;
  270. }
  271. }
  272. }
  273. ptr_y = ref_picture[0] + src_y * linesize + src_x;
  274. ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
  275. ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
  276. if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 1) - 15 , 0) ||
  277. (unsigned)src_y >= FFMAX( v_edge_pos - (motion_y & 1) - h + 1, 0)) {
  278. if (is_mpeg12 ||
  279. s->codec_id == AV_CODEC_ID_MPEG2VIDEO ||
  280. s->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
  281. av_log(s->avctx, AV_LOG_DEBUG,
  282. "MPEG motion vector out of boundary (%d %d)\n", src_x,
  283. src_y);
  284. return;
  285. }
  286. src_y = (unsigned)src_y << field_based;
  287. s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
  288. s->linesize, s->linesize,
  289. 17, 17 + field_based,
  290. src_x, src_y,
  291. s->h_edge_pos, s->v_edge_pos);
  292. ptr_y = s->edge_emu_buffer;
  293. if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
  294. uint8_t *ubuf = s->edge_emu_buffer + 18 * s->linesize;
  295. uint8_t *vbuf = ubuf + 9 * s->uvlinesize;
  296. uvsrc_y = (unsigned)uvsrc_y << field_based;
  297. s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
  298. s->uvlinesize, s->uvlinesize,
  299. 9, 9 + field_based,
  300. uvsrc_x, uvsrc_y,
  301. s->h_edge_pos >> 1, s->v_edge_pos >> 1);
  302. s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
  303. s->uvlinesize, s->uvlinesize,
  304. 9, 9 + field_based,
  305. uvsrc_x, uvsrc_y,
  306. s->h_edge_pos >> 1, s->v_edge_pos >> 1);
  307. ptr_cb = ubuf;
  308. ptr_cr = vbuf;
  309. }
  310. }
  311. /* FIXME use this for field pix too instead of the obnoxious hack which
  312. * changes picture.data */
  313. if (bottom_field) {
  314. dest_y += s->linesize;
  315. dest_cb += s->uvlinesize;
  316. dest_cr += s->uvlinesize;
  317. }
  318. if (field_select) {
  319. ptr_y += s->linesize;
  320. ptr_cb += s->uvlinesize;
  321. ptr_cr += s->uvlinesize;
  322. }
  323. pix_op[0][dxy](dest_y, ptr_y, linesize, h);
  324. if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
  325. pix_op[s->chroma_x_shift][uvdxy]
  326. (dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift);
  327. pix_op[s->chroma_x_shift][uvdxy]
  328. (dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift);
  329. }
  330. if (!is_mpeg12 && (CONFIG_H261_ENCODER || CONFIG_H261_DECODER) &&
  331. s->out_format == FMT_H261) {
  332. ff_h261_loop_filter(s);
  333. }
  334. }
  335. /* apply one mpeg motion vector to the three components */
  336. static void mpeg_motion(MpegEncContext *s,
  337. uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
  338. int field_select, uint8_t **ref_picture,
  339. op_pixels_func (*pix_op)[4],
  340. int motion_x, int motion_y, int h, int mb_y)
  341. {
  342. #if !CONFIG_SMALL
  343. if (s->out_format == FMT_MPEG1)
  344. mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0,
  345. field_select, ref_picture, pix_op,
  346. motion_x, motion_y, h, 1, mb_y);
  347. else
  348. #endif
  349. mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0,
  350. field_select, ref_picture, pix_op,
  351. motion_x, motion_y, h, 0, mb_y);
  352. }
  353. static void mpeg_motion_field(MpegEncContext *s, uint8_t *dest_y,
  354. uint8_t *dest_cb, uint8_t *dest_cr,
  355. int bottom_field, int field_select,
  356. uint8_t **ref_picture,
  357. op_pixels_func (*pix_op)[4],
  358. int motion_x, int motion_y, int h, int mb_y)
  359. {
  360. #if !CONFIG_SMALL
  361. if (s->out_format == FMT_MPEG1)
  362. mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1,
  363. bottom_field, field_select, ref_picture, pix_op,
  364. motion_x, motion_y, h, 1, mb_y);
  365. else
  366. #endif
  367. mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1,
  368. bottom_field, field_select, ref_picture, pix_op,
  369. motion_x, motion_y, h, 0, mb_y);
  370. }
  371. // FIXME: SIMDify, avg variant, 16x16 version
  372. static inline void put_obmc(uint8_t *dst, uint8_t *src[5], int stride)
  373. {
  374. int x;
  375. uint8_t *const top = src[1];
  376. uint8_t *const left = src[2];
  377. uint8_t *const mid = src[0];
  378. uint8_t *const right = src[3];
  379. uint8_t *const bottom = src[4];
  380. #define OBMC_FILTER(x, t, l, m, r, b)\
  381. dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3
  382. #define OBMC_FILTER4(x, t, l, m, r, b)\
  383. OBMC_FILTER(x , t, l, m, r, b);\
  384. OBMC_FILTER(x+1 , t, l, m, r, b);\
  385. OBMC_FILTER(x +stride, t, l, m, r, b);\
  386. OBMC_FILTER(x+1+stride, t, l, m, r, b);
  387. x = 0;
  388. OBMC_FILTER (x , 2, 2, 4, 0, 0);
  389. OBMC_FILTER (x + 1, 2, 1, 5, 0, 0);
  390. OBMC_FILTER4(x + 2, 2, 1, 5, 0, 0);
  391. OBMC_FILTER4(x + 4, 2, 0, 5, 1, 0);
  392. OBMC_FILTER (x + 6, 2, 0, 5, 1, 0);
  393. OBMC_FILTER (x + 7, 2, 0, 4, 2, 0);
  394. x += stride;
  395. OBMC_FILTER (x , 1, 2, 5, 0, 0);
  396. OBMC_FILTER (x + 1, 1, 2, 5, 0, 0);
  397. OBMC_FILTER (x + 6, 1, 0, 5, 2, 0);
  398. OBMC_FILTER (x + 7, 1, 0, 5, 2, 0);
  399. x += stride;
  400. OBMC_FILTER4(x , 1, 2, 5, 0, 0);
  401. OBMC_FILTER4(x + 2, 1, 1, 6, 0, 0);
  402. OBMC_FILTER4(x + 4, 1, 0, 6, 1, 0);
  403. OBMC_FILTER4(x + 6, 1, 0, 5, 2, 0);
  404. x += 2 * stride;
  405. OBMC_FILTER4(x , 0, 2, 5, 0, 1);
  406. OBMC_FILTER4(x + 2, 0, 1, 6, 0, 1);
  407. OBMC_FILTER4(x + 4, 0, 0, 6, 1, 1);
  408. OBMC_FILTER4(x + 6, 0, 0, 5, 2, 1);
  409. x += 2*stride;
  410. OBMC_FILTER (x , 0, 2, 5, 0, 1);
  411. OBMC_FILTER (x + 1, 0, 2, 5, 0, 1);
  412. OBMC_FILTER4(x + 2, 0, 1, 5, 0, 2);
  413. OBMC_FILTER4(x + 4, 0, 0, 5, 1, 2);
  414. OBMC_FILTER (x + 6, 0, 0, 5, 2, 1);
  415. OBMC_FILTER (x + 7, 0, 0, 5, 2, 1);
  416. x += stride;
  417. OBMC_FILTER (x , 0, 2, 4, 0, 2);
  418. OBMC_FILTER (x + 1, 0, 1, 5, 0, 2);
  419. OBMC_FILTER (x + 6, 0, 0, 5, 1, 2);
  420. OBMC_FILTER (x + 7, 0, 0, 4, 2, 2);
  421. }
  422. /* obmc for 1 8x8 luma block */
  423. static inline void obmc_motion(MpegEncContext *s,
  424. uint8_t *dest, uint8_t *src,
  425. int src_x, int src_y,
  426. op_pixels_func *pix_op,
  427. int16_t mv[5][2] /* mid top left right bottom */)
  428. #define MID 0
  429. {
  430. int i;
  431. uint8_t *ptr[5];
  432. av_assert2(s->quarter_sample == 0);
  433. for (i = 0; i < 5; i++) {
  434. if (i && mv[i][0] == mv[MID][0] && mv[i][1] == mv[MID][1]) {
  435. ptr[i] = ptr[MID];
  436. } else {
  437. ptr[i] = s->obmc_scratchpad + 8 * (i & 1) +
  438. s->linesize * 8 * (i >> 1);
  439. hpel_motion(s, ptr[i], src, src_x, src_y, pix_op,
  440. mv[i][0], mv[i][1]);
  441. }
  442. }
  443. put_obmc(dest, ptr, s->linesize);
  444. }
  445. static inline void qpel_motion(MpegEncContext *s,
  446. uint8_t *dest_y,
  447. uint8_t *dest_cb,
  448. uint8_t *dest_cr,
  449. int field_based, int bottom_field,
  450. int field_select, uint8_t **ref_picture,
  451. op_pixels_func (*pix_op)[4],
  452. qpel_mc_func (*qpix_op)[16],
  453. int motion_x, int motion_y, int h)
  454. {
  455. uint8_t *ptr_y, *ptr_cb, *ptr_cr;
  456. int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos;
  457. ptrdiff_t linesize, uvlinesize;
  458. dxy = ((motion_y & 3) << 2) | (motion_x & 3);
  459. src_x = s->mb_x * 16 + (motion_x >> 2);
  460. src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);
  461. v_edge_pos = s->v_edge_pos >> field_based;
  462. linesize = s->linesize << field_based;
  463. uvlinesize = s->uvlinesize << field_based;
  464. if (field_based) {
  465. mx = motion_x / 2;
  466. my = motion_y >> 1;
  467. } else if (s->workaround_bugs & FF_BUG_QPEL_CHROMA2) {
  468. static const int rtab[8] = { 0, 0, 1, 1, 0, 0, 0, 1 };
  469. mx = (motion_x >> 1) + rtab[motion_x & 7];
  470. my = (motion_y >> 1) + rtab[motion_y & 7];
  471. } else if (s->workaround_bugs & FF_BUG_QPEL_CHROMA) {
  472. mx = (motion_x >> 1) | (motion_x & 1);
  473. my = (motion_y >> 1) | (motion_y & 1);
  474. } else {
  475. mx = motion_x / 2;
  476. my = motion_y / 2;
  477. }
  478. mx = (mx >> 1) | (mx & 1);
  479. my = (my >> 1) | (my & 1);
  480. uvdxy = (mx & 1) | ((my & 1) << 1);
  481. mx >>= 1;
  482. my >>= 1;
  483. uvsrc_x = s->mb_x * 8 + mx;
  484. uvsrc_y = s->mb_y * (8 >> field_based) + my;
  485. ptr_y = ref_picture[0] + src_y * linesize + src_x;
  486. ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
  487. ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
  488. if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 3) - 15 , 0) ||
  489. (unsigned)src_y >= FFMAX( v_edge_pos - (motion_y & 3) - h + 1, 0)) {
  490. s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
  491. s->linesize, s->linesize,
  492. 17, 17 + field_based,
  493. src_x, src_y << field_based,
  494. s->h_edge_pos, s->v_edge_pos);
  495. ptr_y = s->edge_emu_buffer;
  496. if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
  497. uint8_t *ubuf = s->edge_emu_buffer + 18 * s->linesize;
  498. uint8_t *vbuf = ubuf + 9 * s->uvlinesize;
  499. s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
  500. s->uvlinesize, s->uvlinesize,
  501. 9, 9 + field_based,
  502. uvsrc_x, uvsrc_y << field_based,
  503. s->h_edge_pos >> 1, s->v_edge_pos >> 1);
  504. s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
  505. s->uvlinesize, s->uvlinesize,
  506. 9, 9 + field_based,
  507. uvsrc_x, uvsrc_y << field_based,
  508. s->h_edge_pos >> 1, s->v_edge_pos >> 1);
  509. ptr_cb = ubuf;
  510. ptr_cr = vbuf;
  511. }
  512. }
  513. if (!field_based)
  514. qpix_op[0][dxy](dest_y, ptr_y, linesize);
  515. else {
  516. if (bottom_field) {
  517. dest_y += s->linesize;
  518. dest_cb += s->uvlinesize;
  519. dest_cr += s->uvlinesize;
  520. }
  521. if (field_select) {
  522. ptr_y += s->linesize;
  523. ptr_cb += s->uvlinesize;
  524. ptr_cr += s->uvlinesize;
  525. }
  526. // damn interlaced mode
  527. // FIXME boundary mirroring is not exactly correct here
  528. qpix_op[1][dxy](dest_y, ptr_y, linesize);
  529. qpix_op[1][dxy](dest_y + 8, ptr_y + 8, linesize);
  530. }
  531. if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
  532. pix_op[1][uvdxy](dest_cr, ptr_cr, uvlinesize, h >> 1);
  533. pix_op[1][uvdxy](dest_cb, ptr_cb, uvlinesize, h >> 1);
  534. }
  535. }
  536. /**
  537. * h263 chroma 4mv motion compensation.
  538. */
  539. static void chroma_4mv_motion(MpegEncContext *s,
  540. uint8_t *dest_cb, uint8_t *dest_cr,
  541. uint8_t **ref_picture,
  542. op_pixels_func *pix_op,
  543. int mx, int my)
  544. {
  545. uint8_t *ptr;
  546. int src_x, src_y, dxy, emu = 0;
  547. ptrdiff_t offset;
  548. /* In case of 8X8, we construct a single chroma motion vector
  549. * with a special rounding */
  550. mx = ff_h263_round_chroma(mx);
  551. my = ff_h263_round_chroma(my);
  552. dxy = ((my & 1) << 1) | (mx & 1);
  553. mx >>= 1;
  554. my >>= 1;
  555. src_x = s->mb_x * 8 + mx;
  556. src_y = s->mb_y * 8 + my;
  557. src_x = av_clip(src_x, -8, (s->width >> 1));
  558. if (src_x == (s->width >> 1))
  559. dxy &= ~1;
  560. src_y = av_clip(src_y, -8, (s->height >> 1));
  561. if (src_y == (s->height >> 1))
  562. dxy &= ~2;
  563. offset = src_y * s->uvlinesize + src_x;
  564. ptr = ref_picture[1] + offset;
  565. if ((unsigned)src_x >= FFMAX((s->h_edge_pos >> 1) - (dxy & 1) - 7, 0) ||
  566. (unsigned)src_y >= FFMAX((s->v_edge_pos >> 1) - (dxy >> 1) - 7, 0)) {
  567. s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
  568. s->uvlinesize, s->uvlinesize,
  569. 9, 9, src_x, src_y,
  570. s->h_edge_pos >> 1, s->v_edge_pos >> 1);
  571. ptr = s->edge_emu_buffer;
  572. emu = 1;
  573. }
  574. pix_op[dxy](dest_cb, ptr, s->uvlinesize, 8);
  575. ptr = ref_picture[2] + offset;
  576. if (emu) {
  577. s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
  578. s->uvlinesize, s->uvlinesize,
  579. 9, 9, src_x, src_y,
  580. s->h_edge_pos >> 1, s->v_edge_pos >> 1);
  581. ptr = s->edge_emu_buffer;
  582. }
  583. pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8);
  584. }
  585. static inline void prefetch_motion(MpegEncContext *s, uint8_t **pix, int dir)
  586. {
  587. /* fetch pixels for estimated mv 4 macroblocks ahead
  588. * optimized for 64byte cache lines */
  589. const int shift = s->quarter_sample ? 2 : 1;
  590. const int mx = (s->mv[dir][0][0] >> shift) + 16 * s->mb_x + 8;
  591. const int my = (s->mv[dir][0][1] >> shift) + 16 * s->mb_y;
  592. int off = mx + (my + (s->mb_x & 3) * 4) * s->linesize + 64;
  593. s->vdsp.prefetch(pix[0] + off, s->linesize, 4);
  594. off = (mx >> 1) + ((my >> 1) + (s->mb_x & 7)) * s->uvlinesize + 64;
  595. s->vdsp.prefetch(pix[1] + off, pix[2] - pix[1], 2);
  596. }
  597. static inline void apply_obmc(MpegEncContext *s,
  598. uint8_t *dest_y,
  599. uint8_t *dest_cb,
  600. uint8_t *dest_cr,
  601. uint8_t **ref_picture,
  602. op_pixels_func (*pix_op)[4])
  603. {
  604. LOCAL_ALIGNED_8(int16_t, mv_cache, [4], [4][2]);
  605. Picture *cur_frame = &s->current_picture;
  606. int mb_x = s->mb_x;
  607. int mb_y = s->mb_y;
  608. const int xy = mb_x + mb_y * s->mb_stride;
  609. const int mot_stride = s->b8_stride;
  610. const int mot_xy = mb_x * 2 + mb_y * 2 * mot_stride;
  611. int mx, my, i;
  612. av_assert2(!s->mb_skipped);
  613. AV_COPY32(mv_cache[1][1], cur_frame->motion_val[0][mot_xy]);
  614. AV_COPY32(mv_cache[1][2], cur_frame->motion_val[0][mot_xy + 1]);
  615. AV_COPY32(mv_cache[2][1],
  616. cur_frame->motion_val[0][mot_xy + mot_stride]);
  617. AV_COPY32(mv_cache[2][2],
  618. cur_frame->motion_val[0][mot_xy + mot_stride + 1]);
  619. AV_COPY32(mv_cache[3][1],
  620. cur_frame->motion_val[0][mot_xy + mot_stride]);
  621. AV_COPY32(mv_cache[3][2],
  622. cur_frame->motion_val[0][mot_xy + mot_stride + 1]);
  623. if (mb_y == 0 || IS_INTRA(cur_frame->mb_type[xy - s->mb_stride])) {
  624. AV_COPY32(mv_cache[0][1], mv_cache[1][1]);
  625. AV_COPY32(mv_cache[0][2], mv_cache[1][2]);
  626. } else {
  627. AV_COPY32(mv_cache[0][1],
  628. cur_frame->motion_val[0][mot_xy - mot_stride]);
  629. AV_COPY32(mv_cache[0][2],
  630. cur_frame->motion_val[0][mot_xy - mot_stride + 1]);
  631. }
  632. if (mb_x == 0 || IS_INTRA(cur_frame->mb_type[xy - 1])) {
  633. AV_COPY32(mv_cache[1][0], mv_cache[1][1]);
  634. AV_COPY32(mv_cache[2][0], mv_cache[2][1]);
  635. } else {
  636. AV_COPY32(mv_cache[1][0], cur_frame->motion_val[0][mot_xy - 1]);
  637. AV_COPY32(mv_cache[2][0],
  638. cur_frame->motion_val[0][mot_xy - 1 + mot_stride]);
  639. }
  640. if (mb_x + 1 >= s->mb_width || IS_INTRA(cur_frame->mb_type[xy + 1])) {
  641. AV_COPY32(mv_cache[1][3], mv_cache[1][2]);
  642. AV_COPY32(mv_cache[2][3], mv_cache[2][2]);
  643. } else {
  644. AV_COPY32(mv_cache[1][3], cur_frame->motion_val[0][mot_xy + 2]);
  645. AV_COPY32(mv_cache[2][3],
  646. cur_frame->motion_val[0][mot_xy + 2 + mot_stride]);
  647. }
  648. mx = 0;
  649. my = 0;
  650. for (i = 0; i < 4; i++) {
  651. const int x = (i & 1) + 1;
  652. const int y = (i >> 1) + 1;
  653. int16_t mv[5][2] = {
  654. { mv_cache[y][x][0], mv_cache[y][x][1] },
  655. { mv_cache[y - 1][x][0], mv_cache[y - 1][x][1] },
  656. { mv_cache[y][x - 1][0], mv_cache[y][x - 1][1] },
  657. { mv_cache[y][x + 1][0], mv_cache[y][x + 1][1] },
  658. { mv_cache[y + 1][x][0], mv_cache[y + 1][x][1] }
  659. };
  660. // FIXME cleanup
  661. obmc_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
  662. ref_picture[0],
  663. mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >> 1) * 8,
  664. pix_op[1],
  665. mv);
  666. mx += mv[0][0];
  667. my += mv[0][1];
  668. }
  669. if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY))
  670. chroma_4mv_motion(s, dest_cb, dest_cr,
  671. ref_picture, pix_op[1],
  672. mx, my);
  673. }
  674. static inline void apply_8x8(MpegEncContext *s,
  675. uint8_t *dest_y,
  676. uint8_t *dest_cb,
  677. uint8_t *dest_cr,
  678. int dir,
  679. uint8_t **ref_picture,
  680. qpel_mc_func (*qpix_op)[16],
  681. op_pixels_func (*pix_op)[4])
  682. {
  683. int dxy, mx, my, src_x, src_y;
  684. int i;
  685. int mb_x = s->mb_x;
  686. int mb_y = s->mb_y;
  687. uint8_t *ptr, *dest;
  688. mx = 0;
  689. my = 0;
  690. if (s->quarter_sample) {
  691. for (i = 0; i < 4; i++) {
  692. int motion_x = s->mv[dir][i][0];
  693. int motion_y = s->mv[dir][i][1];
  694. dxy = ((motion_y & 3) << 2) | (motion_x & 3);
  695. src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
  696. src_y = mb_y * 16 + (motion_y >> 2) + (i >> 1) * 8;
  697. /* WARNING: do no forget half pels */
  698. src_x = av_clip(src_x, -16, s->width);
  699. if (src_x == s->width)
  700. dxy &= ~3;
  701. src_y = av_clip(src_y, -16, s->height);
  702. if (src_y == s->height)
  703. dxy &= ~12;
  704. ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
  705. if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 3) - 7, 0) ||
  706. (unsigned)src_y >= FFMAX(s->v_edge_pos - (motion_y & 3) - 7, 0)) {
  707. s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
  708. s->linesize, s->linesize,
  709. 9, 9,
  710. src_x, src_y,
  711. s->h_edge_pos,
  712. s->v_edge_pos);
  713. ptr = s->edge_emu_buffer;
  714. }
  715. dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
  716. qpix_op[1][dxy](dest, ptr, s->linesize);
  717. mx += s->mv[dir][i][0] / 2;
  718. my += s->mv[dir][i][1] / 2;
  719. }
  720. } else {
  721. for (i = 0; i < 4; i++) {
  722. hpel_motion(s,
  723. dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
  724. ref_picture[0],
  725. mb_x * 16 + (i & 1) * 8,
  726. mb_y * 16 + (i >> 1) * 8,
  727. pix_op[1],
  728. s->mv[dir][i][0],
  729. s->mv[dir][i][1]);
  730. mx += s->mv[dir][i][0];
  731. my += s->mv[dir][i][1];
  732. }
  733. }
  734. if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY))
  735. chroma_4mv_motion(s, dest_cb, dest_cr,
  736. ref_picture, pix_op[1], mx, my);
  737. }
  738. /**
  739. * motion compensation of a single macroblock
  740. * @param s context
  741. * @param dest_y luma destination pointer
  742. * @param dest_cb chroma cb/u destination pointer
  743. * @param dest_cr chroma cr/v destination pointer
  744. * @param dir direction (0->forward, 1->backward)
  745. * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
  746. * @param pix_op halfpel motion compensation function (average or put normally)
  747. * @param qpix_op qpel motion compensation function (average or put normally)
  748. * the motion vectors are taken from s->mv and the MV type from s->mv_type
  749. */
  750. static av_always_inline void mpv_motion_internal(MpegEncContext *s,
  751. uint8_t *dest_y,
  752. uint8_t *dest_cb,
  753. uint8_t *dest_cr,
  754. int dir,
  755. uint8_t **ref_picture,
  756. op_pixels_func (*pix_op)[4],
  757. qpel_mc_func (*qpix_op)[16],
  758. int is_mpeg12)
  759. {
  760. int i;
  761. int mb_y = s->mb_y;
  762. prefetch_motion(s, ref_picture, dir);
  763. if (!is_mpeg12 && s->obmc && s->pict_type != AV_PICTURE_TYPE_B) {
  764. apply_obmc(s, dest_y, dest_cb, dest_cr, ref_picture, pix_op);
  765. return;
  766. }
  767. switch (s->mv_type) {
  768. case MV_TYPE_16X16:
  769. if (s->mcsel) {
  770. if (s->real_sprite_warping_points == 1) {
  771. gmc1_motion(s, dest_y, dest_cb, dest_cr,
  772. ref_picture);
  773. } else {
  774. gmc_motion(s, dest_y, dest_cb, dest_cr,
  775. ref_picture);
  776. }
  777. } else if (!is_mpeg12 && s->quarter_sample) {
  778. qpel_motion(s, dest_y, dest_cb, dest_cr,
  779. 0, 0, 0,
  780. ref_picture, pix_op, qpix_op,
  781. s->mv[dir][0][0], s->mv[dir][0][1], 16);
  782. } else if (!is_mpeg12 && (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) &&
  783. s->mspel && s->codec_id == AV_CODEC_ID_WMV2) {
  784. ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
  785. ref_picture, pix_op,
  786. s->mv[dir][0][0], s->mv[dir][0][1], 16);
  787. } else {
  788. mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
  789. ref_picture, pix_op,
  790. s->mv[dir][0][0], s->mv[dir][0][1], 16, mb_y);
  791. }
  792. break;
  793. case MV_TYPE_8X8:
  794. if (!is_mpeg12)
  795. apply_8x8(s, dest_y, dest_cb, dest_cr,
  796. dir, ref_picture, qpix_op, pix_op);
  797. break;
  798. case MV_TYPE_FIELD:
  799. if (s->picture_structure == PICT_FRAME) {
  800. if (!is_mpeg12 && s->quarter_sample) {
  801. for (i = 0; i < 2; i++)
  802. qpel_motion(s, dest_y, dest_cb, dest_cr,
  803. 1, i, s->field_select[dir][i],
  804. ref_picture, pix_op, qpix_op,
  805. s->mv[dir][i][0], s->mv[dir][i][1], 8);
  806. } else {
  807. /* top field */
  808. mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
  809. 0, s->field_select[dir][0],
  810. ref_picture, pix_op,
  811. s->mv[dir][0][0], s->mv[dir][0][1], 8, mb_y);
  812. /* bottom field */
  813. mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
  814. 1, s->field_select[dir][1],
  815. ref_picture, pix_op,
  816. s->mv[dir][1][0], s->mv[dir][1][1], 8, mb_y);
  817. }
  818. } else {
  819. if ( s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field
  820. || !ref_picture[0]) {
  821. ref_picture = s->current_picture_ptr->f->data;
  822. }
  823. mpeg_motion(s, dest_y, dest_cb, dest_cr,
  824. s->field_select[dir][0],
  825. ref_picture, pix_op,
  826. s->mv[dir][0][0], s->mv[dir][0][1], 16, mb_y >> 1);
  827. }
  828. break;
  829. case MV_TYPE_16X8:
  830. for (i = 0; i < 2; i++) {
  831. uint8_t **ref2picture;
  832. if ((s->picture_structure == s->field_select[dir][i] + 1
  833. || s->pict_type == AV_PICTURE_TYPE_B || s->first_field) && ref_picture[0]) {
  834. ref2picture = ref_picture;
  835. } else {
  836. ref2picture = s->current_picture_ptr->f->data;
  837. }
  838. mpeg_motion(s, dest_y, dest_cb, dest_cr,
  839. s->field_select[dir][i],
  840. ref2picture, pix_op,
  841. s->mv[dir][i][0], s->mv[dir][i][1] + 16 * i,
  842. 8, mb_y >> 1);
  843. dest_y += 16 * s->linesize;
  844. dest_cb += (16 >> s->chroma_y_shift) * s->uvlinesize;
  845. dest_cr += (16 >> s->chroma_y_shift) * s->uvlinesize;
  846. }
  847. break;
  848. case MV_TYPE_DMV:
  849. if (s->picture_structure == PICT_FRAME) {
  850. for (i = 0; i < 2; i++) {
  851. int j;
  852. for (j = 0; j < 2; j++)
  853. mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
  854. j, j ^ i, ref_picture, pix_op,
  855. s->mv[dir][2 * i + j][0],
  856. s->mv[dir][2 * i + j][1], 8, mb_y);
  857. pix_op = s->hdsp.avg_pixels_tab;
  858. }
  859. } else {
  860. if (!ref_picture[0]) {
  861. ref_picture = s->current_picture_ptr->f->data;
  862. }
  863. for (i = 0; i < 2; i++) {
  864. mpeg_motion(s, dest_y, dest_cb, dest_cr,
  865. s->picture_structure != i + 1,
  866. ref_picture, pix_op,
  867. s->mv[dir][2 * i][0], s->mv[dir][2 * i][1],
  868. 16, mb_y >> 1);
  869. // after put we make avg of the same block
  870. pix_op = s->hdsp.avg_pixels_tab;
  871. /* opposite parity is always in the same frame if this is
  872. * second field */
  873. if (!s->first_field) {
  874. ref_picture = s->current_picture_ptr->f->data;
  875. }
  876. }
  877. }
  878. break;
  879. default: av_assert2(0);
  880. }
  881. }
  882. void ff_mpv_motion(MpegEncContext *s,
  883. uint8_t *dest_y, uint8_t *dest_cb,
  884. uint8_t *dest_cr, int dir,
  885. uint8_t **ref_picture,
  886. op_pixels_func (*pix_op)[4],
  887. qpel_mc_func (*qpix_op)[16])
  888. {
  889. #if !CONFIG_SMALL
  890. if (s->out_format == FMT_MPEG1)
  891. mpv_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
  892. ref_picture, pix_op, qpix_op, 1);
  893. else
  894. #endif
  895. mpv_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
  896. ref_picture, pix_op, qpix_op, 0);
  897. }