You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

837 lines
31KB

  1. /*
  2. * The simplest mpeg encoder (well, it was the simplest!)
  3. * Copyright (c) 2000,2001 Fabrice Bellard.
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
  7. *
  8. * This file is part of FFmpeg.
  9. *
  10. * FFmpeg is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * FFmpeg is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with FFmpeg; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. /**
  25. * @file mpegvideo_common.h
  26. * The simplest mpeg encoder (well, it was the simplest!).
  27. */
  28. #ifndef FFMPEG_MPEGVIDEO_COMMON_H
  29. #define FFMPEG_MPEGVIDEO_COMMON_H
  30. #include "avcodec.h"
  31. #include "dsputil.h"
  32. #include "mpegvideo.h"
  33. #include "mjpegenc.h"
  34. #include "msmpeg4.h"
  35. #include "faandct.h"
  36. #include <limits.h>
  37. int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
  38. int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
  39. void denoise_dct_c(MpegEncContext *s, DCTELEM *block);
  40. void copy_picture(Picture *dst, Picture *src);
  41. /**
  42. * allocates a Picture
  43. * The pixels are allocated/set by calling get_buffer() if shared=0
  44. */
  45. int alloc_picture(MpegEncContext *s, Picture *pic, int shared);
  46. /**
  47. * sets the given MpegEncContext to common defaults (same for encoding and decoding).
  48. * the changed fields will not depend upon the prior state of the MpegEncContext.
  49. */
  50. void MPV_common_defaults(MpegEncContext *s);
  51. static inline void gmc1_motion(MpegEncContext *s,
  52. uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
  53. uint8_t **ref_picture)
  54. {
  55. uint8_t *ptr;
  56. int offset, src_x, src_y, linesize, uvlinesize;
  57. int motion_x, motion_y;
  58. int emu=0;
  59. motion_x= s->sprite_offset[0][0];
  60. motion_y= s->sprite_offset[0][1];
  61. src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy+1));
  62. src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy+1));
  63. motion_x<<=(3-s->sprite_warping_accuracy);
  64. motion_y<<=(3-s->sprite_warping_accuracy);
  65. src_x = av_clip(src_x, -16, s->width);
  66. if (src_x == s->width)
  67. motion_x =0;
  68. src_y = av_clip(src_y, -16, s->height);
  69. if (src_y == s->height)
  70. motion_y =0;
  71. linesize = s->linesize;
  72. uvlinesize = s->uvlinesize;
  73. ptr = ref_picture[0] + (src_y * linesize) + src_x;
  74. if(s->flags&CODEC_FLAG_EMU_EDGE){
  75. if( (unsigned)src_x >= s->h_edge_pos - 17
  76. || (unsigned)src_y >= s->v_edge_pos - 17){
  77. ff_emulated_edge_mc(s->edge_emu_buffer, ptr, linesize, 17, 17, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
  78. ptr= s->edge_emu_buffer;
  79. }
  80. }
  81. if((motion_x|motion_y)&7){
  82. s->dsp.gmc1(dest_y , ptr , linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
  83. s->dsp.gmc1(dest_y+8, ptr+8, linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
  84. }else{
  85. int dxy;
  86. dxy= ((motion_x>>3)&1) | ((motion_y>>2)&2);
  87. if (s->no_rounding){
  88. s->dsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
  89. }else{
  90. s->dsp.put_pixels_tab [0][dxy](dest_y, ptr, linesize, 16);
  91. }
  92. }
  93. if(ENABLE_GRAY && s->flags&CODEC_FLAG_GRAY) return;
  94. motion_x= s->sprite_offset[1][0];
  95. motion_y= s->sprite_offset[1][1];
  96. src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy+1));
  97. src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy+1));
  98. motion_x<<=(3-s->sprite_warping_accuracy);
  99. motion_y<<=(3-s->sprite_warping_accuracy);
  100. src_x = av_clip(src_x, -8, s->width>>1);
  101. if (src_x == s->width>>1)
  102. motion_x =0;
  103. src_y = av_clip(src_y, -8, s->height>>1);
  104. if (src_y == s->height>>1)
  105. motion_y =0;
  106. offset = (src_y * uvlinesize) + src_x;
  107. ptr = ref_picture[1] + offset;
  108. if(s->flags&CODEC_FLAG_EMU_EDGE){
  109. if( (unsigned)src_x >= (s->h_edge_pos>>1) - 9
  110. || (unsigned)src_y >= (s->v_edge_pos>>1) - 9){
  111. ff_emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
  112. ptr= s->edge_emu_buffer;
  113. emu=1;
  114. }
  115. }
  116. s->dsp.gmc1(dest_cb, ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
  117. ptr = ref_picture[2] + offset;
  118. if(emu){
  119. ff_emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
  120. ptr= s->edge_emu_buffer;
  121. }
  122. s->dsp.gmc1(dest_cr, ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
  123. return;
  124. }
  125. static inline void gmc_motion(MpegEncContext *s,
  126. uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
  127. uint8_t **ref_picture)
  128. {
  129. uint8_t *ptr;
  130. int linesize, uvlinesize;
  131. const int a= s->sprite_warping_accuracy;
  132. int ox, oy;
  133. linesize = s->linesize;
  134. uvlinesize = s->uvlinesize;
  135. ptr = ref_picture[0];
  136. ox= s->sprite_offset[0][0] + s->sprite_delta[0][0]*s->mb_x*16 + s->sprite_delta[0][1]*s->mb_y*16;
  137. oy= s->sprite_offset[0][1] + s->sprite_delta[1][0]*s->mb_x*16 + s->sprite_delta[1][1]*s->mb_y*16;
  138. s->dsp.gmc(dest_y, ptr, linesize, 16,
  139. ox,
  140. oy,
  141. s->sprite_delta[0][0], s->sprite_delta[0][1],
  142. s->sprite_delta[1][0], s->sprite_delta[1][1],
  143. a+1, (1<<(2*a+1)) - s->no_rounding,
  144. s->h_edge_pos, s->v_edge_pos);
  145. s->dsp.gmc(dest_y+8, ptr, linesize, 16,
  146. ox + s->sprite_delta[0][0]*8,
  147. oy + s->sprite_delta[1][0]*8,
  148. s->sprite_delta[0][0], s->sprite_delta[0][1],
  149. s->sprite_delta[1][0], s->sprite_delta[1][1],
  150. a+1, (1<<(2*a+1)) - s->no_rounding,
  151. s->h_edge_pos, s->v_edge_pos);
  152. if(ENABLE_GRAY && s->flags&CODEC_FLAG_GRAY) return;
  153. ox= s->sprite_offset[1][0] + s->sprite_delta[0][0]*s->mb_x*8 + s->sprite_delta[0][1]*s->mb_y*8;
  154. oy= s->sprite_offset[1][1] + s->sprite_delta[1][0]*s->mb_x*8 + s->sprite_delta[1][1]*s->mb_y*8;
  155. ptr = ref_picture[1];
  156. s->dsp.gmc(dest_cb, ptr, uvlinesize, 8,
  157. ox,
  158. oy,
  159. s->sprite_delta[0][0], s->sprite_delta[0][1],
  160. s->sprite_delta[1][0], s->sprite_delta[1][1],
  161. a+1, (1<<(2*a+1)) - s->no_rounding,
  162. s->h_edge_pos>>1, s->v_edge_pos>>1);
  163. ptr = ref_picture[2];
  164. s->dsp.gmc(dest_cr, ptr, uvlinesize, 8,
  165. ox,
  166. oy,
  167. s->sprite_delta[0][0], s->sprite_delta[0][1],
  168. s->sprite_delta[1][0], s->sprite_delta[1][1],
  169. a+1, (1<<(2*a+1)) - s->no_rounding,
  170. s->h_edge_pos>>1, s->v_edge_pos>>1);
  171. }
  172. static inline int hpel_motion(MpegEncContext *s,
  173. uint8_t *dest, uint8_t *src,
  174. int field_based, int field_select,
  175. int src_x, int src_y,
  176. int width, int height, int stride,
  177. int h_edge_pos, int v_edge_pos,
  178. int w, int h, op_pixels_func *pix_op,
  179. int motion_x, int motion_y)
  180. {
  181. int dxy;
  182. int emu=0;
  183. dxy = ((motion_y & 1) << 1) | (motion_x & 1);
  184. src_x += motion_x >> 1;
  185. src_y += motion_y >> 1;
  186. /* WARNING: do no forget half pels */
  187. src_x = av_clip(src_x, -16, width); //FIXME unneeded for emu?
  188. if (src_x == width)
  189. dxy &= ~1;
  190. src_y = av_clip(src_y, -16, height);
  191. if (src_y == height)
  192. dxy &= ~2;
  193. src += src_y * stride + src_x;
  194. if(s->unrestricted_mv && (s->flags&CODEC_FLAG_EMU_EDGE)){
  195. if( (unsigned)src_x > h_edge_pos - (motion_x&1) - w
  196. || (unsigned)src_y > v_edge_pos - (motion_y&1) - h){
  197. ff_emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
  198. src_x, src_y<<field_based, h_edge_pos, s->v_edge_pos);
  199. src= s->edge_emu_buffer;
  200. emu=1;
  201. }
  202. }
  203. if(field_select)
  204. src += s->linesize;
  205. pix_op[dxy](dest, src, stride, h);
  206. return emu;
  207. }
  208. /* apply one mpeg motion vector to the three components */
  209. static av_always_inline void mpeg_motion(MpegEncContext *s,
  210. uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
  211. int field_based, int bottom_field, int field_select,
  212. uint8_t **ref_picture, op_pixels_func (*pix_op)[4],
  213. int motion_x, int motion_y, int h)
  214. {
  215. uint8_t *ptr_y, *ptr_cb, *ptr_cr;
  216. int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos, uvlinesize, linesize;
  217. #if 0
  218. if(s->quarter_sample)
  219. {
  220. motion_x>>=1;
  221. motion_y>>=1;
  222. }
  223. #endif
  224. v_edge_pos = s->v_edge_pos >> field_based;
  225. linesize = s->current_picture.linesize[0] << field_based;
  226. uvlinesize = s->current_picture.linesize[1] << field_based;
  227. dxy = ((motion_y & 1) << 1) | (motion_x & 1);
  228. src_x = s->mb_x* 16 + (motion_x >> 1);
  229. src_y =(s->mb_y<<(4-field_based)) + (motion_y >> 1);
  230. if (s->out_format == FMT_H263) {
  231. if((s->workaround_bugs & FF_BUG_HPEL_CHROMA) && field_based){
  232. mx = (motion_x>>1)|(motion_x&1);
  233. my = motion_y >>1;
  234. uvdxy = ((my & 1) << 1) | (mx & 1);
  235. uvsrc_x = s->mb_x* 8 + (mx >> 1);
  236. uvsrc_y = (s->mb_y<<(3-field_based)) + (my >> 1);
  237. }else{
  238. uvdxy = dxy | (motion_y & 2) | ((motion_x & 2) >> 1);
  239. uvsrc_x = src_x>>1;
  240. uvsrc_y = src_y>>1;
  241. }
  242. }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
  243. mx = motion_x / 4;
  244. my = motion_y / 4;
  245. uvdxy = 0;
  246. uvsrc_x = s->mb_x*8 + mx;
  247. uvsrc_y = s->mb_y*8 + my;
  248. } else {
  249. if(s->chroma_y_shift){
  250. mx = motion_x / 2;
  251. my = motion_y / 2;
  252. uvdxy = ((my & 1) << 1) | (mx & 1);
  253. uvsrc_x = s->mb_x* 8 + (mx >> 1);
  254. uvsrc_y = (s->mb_y<<(3-field_based)) + (my >> 1);
  255. } else {
  256. if(s->chroma_x_shift){
  257. //Chroma422
  258. mx = motion_x / 2;
  259. uvdxy = ((motion_y & 1) << 1) | (mx & 1);
  260. uvsrc_x = s->mb_x* 8 + (mx >> 1);
  261. uvsrc_y = src_y;
  262. } else {
  263. //Chroma444
  264. uvdxy = dxy;
  265. uvsrc_x = src_x;
  266. uvsrc_y = src_y;
  267. }
  268. }
  269. }
  270. ptr_y = ref_picture[0] + src_y * linesize + src_x;
  271. ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
  272. ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
  273. if( (unsigned)src_x > s->h_edge_pos - (motion_x&1) - 16
  274. || (unsigned)src_y > v_edge_pos - (motion_y&1) - h){
  275. if(s->codec_id == CODEC_ID_MPEG2VIDEO ||
  276. s->codec_id == CODEC_ID_MPEG1VIDEO){
  277. av_log(s->avctx,AV_LOG_DEBUG,"MPEG motion vector out of boundary\n");
  278. return ;
  279. }
  280. ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
  281. src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
  282. ptr_y = s->edge_emu_buffer;
  283. if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
  284. uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
  285. ff_emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
  286. uvsrc_x, uvsrc_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
  287. ff_emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
  288. uvsrc_x, uvsrc_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
  289. ptr_cb= uvbuf;
  290. ptr_cr= uvbuf+16;
  291. }
  292. }
  293. if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
  294. dest_y += s->linesize;
  295. dest_cb+= s->uvlinesize;
  296. dest_cr+= s->uvlinesize;
  297. }
  298. if(field_select){
  299. ptr_y += s->linesize;
  300. ptr_cb+= s->uvlinesize;
  301. ptr_cr+= s->uvlinesize;
  302. }
  303. pix_op[0][dxy](dest_y, ptr_y, linesize, h);
  304. if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
  305. pix_op[s->chroma_x_shift][uvdxy](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift);
  306. pix_op[s->chroma_x_shift][uvdxy](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift);
  307. }
  308. if((ENABLE_H261_ENCODER || ENABLE_H261_DECODER) && s->out_format == FMT_H261){
  309. ff_h261_loop_filter(s);
  310. }
  311. }
  312. //FIXME move to dsputil, avg variant, 16x16 version
  313. static inline void put_obmc(uint8_t *dst, uint8_t *src[5], int stride){
  314. int x;
  315. uint8_t * const top = src[1];
  316. uint8_t * const left = src[2];
  317. uint8_t * const mid = src[0];
  318. uint8_t * const right = src[3];
  319. uint8_t * const bottom= src[4];
  320. #define OBMC_FILTER(x, t, l, m, r, b)\
  321. dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3
  322. #define OBMC_FILTER4(x, t, l, m, r, b)\
  323. OBMC_FILTER(x , t, l, m, r, b);\
  324. OBMC_FILTER(x+1 , t, l, m, r, b);\
  325. OBMC_FILTER(x +stride, t, l, m, r, b);\
  326. OBMC_FILTER(x+1+stride, t, l, m, r, b);
  327. x=0;
  328. OBMC_FILTER (x , 2, 2, 4, 0, 0);
  329. OBMC_FILTER (x+1, 2, 1, 5, 0, 0);
  330. OBMC_FILTER4(x+2, 2, 1, 5, 0, 0);
  331. OBMC_FILTER4(x+4, 2, 0, 5, 1, 0);
  332. OBMC_FILTER (x+6, 2, 0, 5, 1, 0);
  333. OBMC_FILTER (x+7, 2, 0, 4, 2, 0);
  334. x+= stride;
  335. OBMC_FILTER (x , 1, 2, 5, 0, 0);
  336. OBMC_FILTER (x+1, 1, 2, 5, 0, 0);
  337. OBMC_FILTER (x+6, 1, 0, 5, 2, 0);
  338. OBMC_FILTER (x+7, 1, 0, 5, 2, 0);
  339. x+= stride;
  340. OBMC_FILTER4(x , 1, 2, 5, 0, 0);
  341. OBMC_FILTER4(x+2, 1, 1, 6, 0, 0);
  342. OBMC_FILTER4(x+4, 1, 0, 6, 1, 0);
  343. OBMC_FILTER4(x+6, 1, 0, 5, 2, 0);
  344. x+= 2*stride;
  345. OBMC_FILTER4(x , 0, 2, 5, 0, 1);
  346. OBMC_FILTER4(x+2, 0, 1, 6, 0, 1);
  347. OBMC_FILTER4(x+4, 0, 0, 6, 1, 1);
  348. OBMC_FILTER4(x+6, 0, 0, 5, 2, 1);
  349. x+= 2*stride;
  350. OBMC_FILTER (x , 0, 2, 5, 0, 1);
  351. OBMC_FILTER (x+1, 0, 2, 5, 0, 1);
  352. OBMC_FILTER4(x+2, 0, 1, 5, 0, 2);
  353. OBMC_FILTER4(x+4, 0, 0, 5, 1, 2);
  354. OBMC_FILTER (x+6, 0, 0, 5, 2, 1);
  355. OBMC_FILTER (x+7, 0, 0, 5, 2, 1);
  356. x+= stride;
  357. OBMC_FILTER (x , 0, 2, 4, 0, 2);
  358. OBMC_FILTER (x+1, 0, 1, 5, 0, 2);
  359. OBMC_FILTER (x+6, 0, 0, 5, 1, 2);
  360. OBMC_FILTER (x+7, 0, 0, 4, 2, 2);
  361. }
  362. /* obmc for 1 8x8 luma block */
  363. static inline void obmc_motion(MpegEncContext *s,
  364. uint8_t *dest, uint8_t *src,
  365. int src_x, int src_y,
  366. op_pixels_func *pix_op,
  367. int16_t mv[5][2]/* mid top left right bottom*/)
  368. #define MID 0
  369. {
  370. int i;
  371. uint8_t *ptr[5];
  372. assert(s->quarter_sample==0);
  373. for(i=0; i<5; i++){
  374. if(i && mv[i][0]==mv[MID][0] && mv[i][1]==mv[MID][1]){
  375. ptr[i]= ptr[MID];
  376. }else{
  377. ptr[i]= s->obmc_scratchpad + 8*(i&1) + s->linesize*8*(i>>1);
  378. hpel_motion(s, ptr[i], src, 0, 0,
  379. src_x, src_y,
  380. s->width, s->height, s->linesize,
  381. s->h_edge_pos, s->v_edge_pos,
  382. 8, 8, pix_op,
  383. mv[i][0], mv[i][1]);
  384. }
  385. }
  386. put_obmc(dest, ptr, s->linesize);
  387. }
  388. static inline void qpel_motion(MpegEncContext *s,
  389. uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
  390. int field_based, int bottom_field, int field_select,
  391. uint8_t **ref_picture, op_pixels_func (*pix_op)[4],
  392. qpel_mc_func (*qpix_op)[16],
  393. int motion_x, int motion_y, int h)
  394. {
  395. uint8_t *ptr_y, *ptr_cb, *ptr_cr;
  396. int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos, linesize, uvlinesize;
  397. dxy = ((motion_y & 3) << 2) | (motion_x & 3);
  398. src_x = s->mb_x * 16 + (motion_x >> 2);
  399. src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);
  400. v_edge_pos = s->v_edge_pos >> field_based;
  401. linesize = s->linesize << field_based;
  402. uvlinesize = s->uvlinesize << field_based;
  403. if(field_based){
  404. mx= motion_x/2;
  405. my= motion_y>>1;
  406. }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA2){
  407. static const int rtab[8]= {0,0,1,1,0,0,0,1};
  408. mx= (motion_x>>1) + rtab[motion_x&7];
  409. my= (motion_y>>1) + rtab[motion_y&7];
  410. }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA){
  411. mx= (motion_x>>1)|(motion_x&1);
  412. my= (motion_y>>1)|(motion_y&1);
  413. }else{
  414. mx= motion_x/2;
  415. my= motion_y/2;
  416. }
  417. mx= (mx>>1)|(mx&1);
  418. my= (my>>1)|(my&1);
  419. uvdxy= (mx&1) | ((my&1)<<1);
  420. mx>>=1;
  421. my>>=1;
  422. uvsrc_x = s->mb_x * 8 + mx;
  423. uvsrc_y = s->mb_y * (8 >> field_based) + my;
  424. ptr_y = ref_picture[0] + src_y * linesize + src_x;
  425. ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
  426. ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
  427. if( (unsigned)src_x > s->h_edge_pos - (motion_x&3) - 16
  428. || (unsigned)src_y > v_edge_pos - (motion_y&3) - h ){
  429. ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
  430. src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
  431. ptr_y= s->edge_emu_buffer;
  432. if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
  433. uint8_t *uvbuf= s->edge_emu_buffer + 18*s->linesize;
  434. ff_emulated_edge_mc(uvbuf, ptr_cb, s->uvlinesize, 9, 9 + field_based,
  435. uvsrc_x, uvsrc_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
  436. ff_emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize, 9, 9 + field_based,
  437. uvsrc_x, uvsrc_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
  438. ptr_cb= uvbuf;
  439. ptr_cr= uvbuf + 16;
  440. }
  441. }
  442. if(!field_based)
  443. qpix_op[0][dxy](dest_y, ptr_y, linesize);
  444. else{
  445. if(bottom_field){
  446. dest_y += s->linesize;
  447. dest_cb+= s->uvlinesize;
  448. dest_cr+= s->uvlinesize;
  449. }
  450. if(field_select){
  451. ptr_y += s->linesize;
  452. ptr_cb += s->uvlinesize;
  453. ptr_cr += s->uvlinesize;
  454. }
  455. //damn interlaced mode
  456. //FIXME boundary mirroring is not exactly correct here
  457. qpix_op[1][dxy](dest_y , ptr_y , linesize);
  458. qpix_op[1][dxy](dest_y+8, ptr_y+8, linesize);
  459. }
  460. if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
  461. pix_op[1][uvdxy](dest_cr, ptr_cr, uvlinesize, h >> 1);
  462. pix_op[1][uvdxy](dest_cb, ptr_cb, uvlinesize, h >> 1);
  463. }
  464. }
  465. /**
  466. * h263 chroma 4mv motion compensation.
  467. */
  468. static inline void chroma_4mv_motion(MpegEncContext *s,
  469. uint8_t *dest_cb, uint8_t *dest_cr,
  470. uint8_t **ref_picture,
  471. op_pixels_func *pix_op,
  472. int mx, int my){
  473. int dxy, emu=0, src_x, src_y, offset;
  474. uint8_t *ptr;
  475. /* In case of 8X8, we construct a single chroma motion vector
  476. with a special rounding */
  477. mx= ff_h263_round_chroma(mx);
  478. my= ff_h263_round_chroma(my);
  479. dxy = ((my & 1) << 1) | (mx & 1);
  480. mx >>= 1;
  481. my >>= 1;
  482. src_x = s->mb_x * 8 + mx;
  483. src_y = s->mb_y * 8 + my;
  484. src_x = av_clip(src_x, -8, s->width/2);
  485. if (src_x == s->width/2)
  486. dxy &= ~1;
  487. src_y = av_clip(src_y, -8, s->height/2);
  488. if (src_y == s->height/2)
  489. dxy &= ~2;
  490. offset = (src_y * (s->uvlinesize)) + src_x;
  491. ptr = ref_picture[1] + offset;
  492. if(s->flags&CODEC_FLAG_EMU_EDGE){
  493. if( (unsigned)src_x > (s->h_edge_pos>>1) - (dxy &1) - 8
  494. || (unsigned)src_y > (s->v_edge_pos>>1) - (dxy>>1) - 8){
  495. ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
  496. ptr= s->edge_emu_buffer;
  497. emu=1;
  498. }
  499. }
  500. pix_op[dxy](dest_cb, ptr, s->uvlinesize, 8);
  501. ptr = ref_picture[2] + offset;
  502. if(emu){
  503. ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
  504. ptr= s->edge_emu_buffer;
  505. }
  506. pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8);
  507. }
  508. static inline void prefetch_motion(MpegEncContext *s, uint8_t **pix, int dir){
  509. /* fetch pixels for estimated mv 4 macroblocks ahead
  510. * optimized for 64byte cache lines */
  511. const int shift = s->quarter_sample ? 2 : 1;
  512. const int mx= (s->mv[dir][0][0]>>shift) + 16*s->mb_x + 8;
  513. const int my= (s->mv[dir][0][1]>>shift) + 16*s->mb_y;
  514. int off= mx + (my + (s->mb_x&3)*4)*s->linesize + 64;
  515. s->dsp.prefetch(pix[0]+off, s->linesize, 4);
  516. off= (mx>>1) + ((my>>1) + (s->mb_x&7))*s->uvlinesize + 64;
  517. s->dsp.prefetch(pix[1]+off, pix[2]-pix[1], 2);
  518. }
  519. /**
  520. * motion compensation of a single macroblock
  521. * @param s context
  522. * @param dest_y luma destination pointer
  523. * @param dest_cb chroma cb/u destination pointer
  524. * @param dest_cr chroma cr/v destination pointer
  525. * @param dir direction (0->forward, 1->backward)
  526. * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
  527. * @param pic_op halfpel motion compensation function (average or put normally)
  528. * @param pic_op qpel motion compensation function (average or put normally)
  529. * the motion vectors are taken from s->mv and the MV type from s->mv_type
  530. */
  531. static inline void MPV_motion(MpegEncContext *s,
  532. uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
  533. int dir, uint8_t **ref_picture,
  534. op_pixels_func (*pix_op)[4], qpel_mc_func (*qpix_op)[16])
  535. {
  536. int dxy, mx, my, src_x, src_y, motion_x, motion_y;
  537. int mb_x, mb_y, i;
  538. uint8_t *ptr, *dest;
  539. mb_x = s->mb_x;
  540. mb_y = s->mb_y;
  541. prefetch_motion(s, ref_picture, dir);
  542. if(s->obmc && s->pict_type != B_TYPE){
  543. int16_t mv_cache[4][4][2];
  544. const int xy= s->mb_x + s->mb_y*s->mb_stride;
  545. const int mot_stride= s->b8_stride;
  546. const int mot_xy= mb_x*2 + mb_y*2*mot_stride;
  547. assert(!s->mb_skipped);
  548. memcpy(mv_cache[1][1], s->current_picture.motion_val[0][mot_xy ], sizeof(int16_t)*4);
  549. memcpy(mv_cache[2][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4);
  550. memcpy(mv_cache[3][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4);
  551. if(mb_y==0 || IS_INTRA(s->current_picture.mb_type[xy-s->mb_stride])){
  552. memcpy(mv_cache[0][1], mv_cache[1][1], sizeof(int16_t)*4);
  553. }else{
  554. memcpy(mv_cache[0][1], s->current_picture.motion_val[0][mot_xy-mot_stride], sizeof(int16_t)*4);
  555. }
  556. if(mb_x==0 || IS_INTRA(s->current_picture.mb_type[xy-1])){
  557. *(int32_t*)mv_cache[1][0]= *(int32_t*)mv_cache[1][1];
  558. *(int32_t*)mv_cache[2][0]= *(int32_t*)mv_cache[2][1];
  559. }else{
  560. *(int32_t*)mv_cache[1][0]= *(int32_t*)s->current_picture.motion_val[0][mot_xy-1];
  561. *(int32_t*)mv_cache[2][0]= *(int32_t*)s->current_picture.motion_val[0][mot_xy-1+mot_stride];
  562. }
  563. if(mb_x+1>=s->mb_width || IS_INTRA(s->current_picture.mb_type[xy+1])){
  564. *(int32_t*)mv_cache[1][3]= *(int32_t*)mv_cache[1][2];
  565. *(int32_t*)mv_cache[2][3]= *(int32_t*)mv_cache[2][2];
  566. }else{
  567. *(int32_t*)mv_cache[1][3]= *(int32_t*)s->current_picture.motion_val[0][mot_xy+2];
  568. *(int32_t*)mv_cache[2][3]= *(int32_t*)s->current_picture.motion_val[0][mot_xy+2+mot_stride];
  569. }
  570. mx = 0;
  571. my = 0;
  572. for(i=0;i<4;i++) {
  573. const int x= (i&1)+1;
  574. const int y= (i>>1)+1;
  575. int16_t mv[5][2]= {
  576. {mv_cache[y][x ][0], mv_cache[y][x ][1]},
  577. {mv_cache[y-1][x][0], mv_cache[y-1][x][1]},
  578. {mv_cache[y][x-1][0], mv_cache[y][x-1][1]},
  579. {mv_cache[y][x+1][0], mv_cache[y][x+1][1]},
  580. {mv_cache[y+1][x][0], mv_cache[y+1][x][1]}};
  581. //FIXME cleanup
  582. obmc_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
  583. ref_picture[0],
  584. mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >>1) * 8,
  585. pix_op[1],
  586. mv);
  587. mx += mv[0][0];
  588. my += mv[0][1];
  589. }
  590. if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY))
  591. chroma_4mv_motion(s, dest_cb, dest_cr, ref_picture, pix_op[1], mx, my);
  592. return;
  593. }
  594. switch(s->mv_type) {
  595. case MV_TYPE_16X16:
  596. if(s->mcsel){
  597. if(s->real_sprite_warping_points==1){
  598. gmc1_motion(s, dest_y, dest_cb, dest_cr,
  599. ref_picture);
  600. }else{
  601. gmc_motion(s, dest_y, dest_cb, dest_cr,
  602. ref_picture);
  603. }
  604. }else if(s->quarter_sample){
  605. qpel_motion(s, dest_y, dest_cb, dest_cr,
  606. 0, 0, 0,
  607. ref_picture, pix_op, qpix_op,
  608. s->mv[dir][0][0], s->mv[dir][0][1], 16);
  609. }else if(ENABLE_WMV2 && s->mspel){
  610. ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
  611. ref_picture, pix_op,
  612. s->mv[dir][0][0], s->mv[dir][0][1], 16);
  613. }else
  614. {
  615. mpeg_motion(s, dest_y, dest_cb, dest_cr,
  616. 0, 0, 0,
  617. ref_picture, pix_op,
  618. s->mv[dir][0][0], s->mv[dir][0][1], 16);
  619. }
  620. break;
  621. case MV_TYPE_8X8:
  622. mx = 0;
  623. my = 0;
  624. if(s->quarter_sample){
  625. for(i=0;i<4;i++) {
  626. motion_x = s->mv[dir][i][0];
  627. motion_y = s->mv[dir][i][1];
  628. dxy = ((motion_y & 3) << 2) | (motion_x & 3);
  629. src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
  630. src_y = mb_y * 16 + (motion_y >> 2) + (i >>1) * 8;
  631. /* WARNING: do no forget half pels */
  632. src_x = av_clip(src_x, -16, s->width);
  633. if (src_x == s->width)
  634. dxy &= ~3;
  635. src_y = av_clip(src_y, -16, s->height);
  636. if (src_y == s->height)
  637. dxy &= ~12;
  638. ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
  639. if(s->flags&CODEC_FLAG_EMU_EDGE){
  640. if( (unsigned)src_x > s->h_edge_pos - (motion_x&3) - 8
  641. || (unsigned)src_y > s->v_edge_pos - (motion_y&3) - 8 ){
  642. ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
  643. ptr= s->edge_emu_buffer;
  644. }
  645. }
  646. dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
  647. qpix_op[1][dxy](dest, ptr, s->linesize);
  648. mx += s->mv[dir][i][0]/2;
  649. my += s->mv[dir][i][1]/2;
  650. }
  651. }else{
  652. for(i=0;i<4;i++) {
  653. hpel_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
  654. ref_picture[0], 0, 0,
  655. mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >>1) * 8,
  656. s->width, s->height, s->linesize,
  657. s->h_edge_pos, s->v_edge_pos,
  658. 8, 8, pix_op[1],
  659. s->mv[dir][i][0], s->mv[dir][i][1]);
  660. mx += s->mv[dir][i][0];
  661. my += s->mv[dir][i][1];
  662. }
  663. }
  664. if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY))
  665. chroma_4mv_motion(s, dest_cb, dest_cr, ref_picture, pix_op[1], mx, my);
  666. break;
  667. case MV_TYPE_FIELD:
  668. if (s->picture_structure == PICT_FRAME) {
  669. if(s->quarter_sample){
  670. for(i=0; i<2; i++){
  671. qpel_motion(s, dest_y, dest_cb, dest_cr,
  672. 1, i, s->field_select[dir][i],
  673. ref_picture, pix_op, qpix_op,
  674. s->mv[dir][i][0], s->mv[dir][i][1], 8);
  675. }
  676. }else{
  677. /* top field */
  678. mpeg_motion(s, dest_y, dest_cb, dest_cr,
  679. 1, 0, s->field_select[dir][0],
  680. ref_picture, pix_op,
  681. s->mv[dir][0][0], s->mv[dir][0][1], 8);
  682. /* bottom field */
  683. mpeg_motion(s, dest_y, dest_cb, dest_cr,
  684. 1, 1, s->field_select[dir][1],
  685. ref_picture, pix_op,
  686. s->mv[dir][1][0], s->mv[dir][1][1], 8);
  687. }
  688. } else {
  689. if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != B_TYPE && !s->first_field){
  690. ref_picture= s->current_picture_ptr->data;
  691. }
  692. mpeg_motion(s, dest_y, dest_cb, dest_cr,
  693. 0, 0, s->field_select[dir][0],
  694. ref_picture, pix_op,
  695. s->mv[dir][0][0], s->mv[dir][0][1], 16);
  696. }
  697. break;
  698. case MV_TYPE_16X8:
  699. for(i=0; i<2; i++){
  700. uint8_t ** ref2picture;
  701. if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == B_TYPE || s->first_field){
  702. ref2picture= ref_picture;
  703. }else{
  704. ref2picture= s->current_picture_ptr->data;
  705. }
  706. mpeg_motion(s, dest_y, dest_cb, dest_cr,
  707. 0, 0, s->field_select[dir][i],
  708. ref2picture, pix_op,
  709. s->mv[dir][i][0], s->mv[dir][i][1] + 16*i, 8);
  710. dest_y += 16*s->linesize;
  711. dest_cb+= (16>>s->chroma_y_shift)*s->uvlinesize;
  712. dest_cr+= (16>>s->chroma_y_shift)*s->uvlinesize;
  713. }
  714. break;
  715. case MV_TYPE_DMV:
  716. if(s->picture_structure == PICT_FRAME){
  717. for(i=0; i<2; i++){
  718. int j;
  719. for(j=0; j<2; j++){
  720. mpeg_motion(s, dest_y, dest_cb, dest_cr,
  721. 1, j, j^i,
  722. ref_picture, pix_op,
  723. s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], 8);
  724. }
  725. pix_op = s->dsp.avg_pixels_tab;
  726. }
  727. }else{
  728. for(i=0; i<2; i++){
  729. mpeg_motion(s, dest_y, dest_cb, dest_cr,
  730. 0, 0, s->picture_structure != i+1,
  731. ref_picture, pix_op,
  732. s->mv[dir][2*i][0],s->mv[dir][2*i][1],16);
  733. // after put we make avg of the same block
  734. pix_op=s->dsp.avg_pixels_tab;
  735. //opposite parity is always in the same frame if this is second field
  736. if(!s->first_field){
  737. ref_picture = s->current_picture_ptr->data;
  738. }
  739. }
  740. }
  741. break;
  742. default: assert(0);
  743. }
  744. }
  745. #endif /* FFMPEG_MPEGVIDEO_COMMON_H */