You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2901 lines
109KB

  1. /*
  2. * The simplest mpeg encoder (well, it was the simplest!)
  3. * Copyright (c) 2000,2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
  7. *
  8. * This file is part of Libav.
  9. *
  10. * Libav is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * Libav is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with Libav; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. /**
  25. * @file
  26. * The simplest mpeg encoder (well, it was the simplest!).
  27. */
  28. #include "libavutil/intmath.h"
  29. #include "libavutil/imgutils.h"
  30. #include "avcodec.h"
  31. #include "dsputil.h"
  32. #include "internal.h"
  33. #include "mpegvideo.h"
  34. #include "mpegvideo_common.h"
  35. #include "mjpegenc.h"
  36. #include "msmpeg4.h"
  37. #include "faandct.h"
  38. #include "xvmc_internal.h"
  39. #include "thread.h"
  40. #include <limits.h>
  41. //#undef NDEBUG
  42. //#include <assert.h>
  43. static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
  44. DCTELEM *block, int n, int qscale);
  45. static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
  46. DCTELEM *block, int n, int qscale);
  47. static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
  48. DCTELEM *block, int n, int qscale);
  49. static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
  50. DCTELEM *block, int n, int qscale);
  51. static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
  52. DCTELEM *block, int n, int qscale);
  53. static void dct_unquantize_h263_intra_c(MpegEncContext *s,
  54. DCTELEM *block, int n, int qscale);
  55. static void dct_unquantize_h263_inter_c(MpegEncContext *s,
  56. DCTELEM *block, int n, int qscale);
  57. /* enable all paranoid tests for rounding, overflows, etc... */
  58. //#define PARANOID
  59. //#define DEBUG
  60. static const uint8_t ff_default_chroma_qscale_table[32] = {
  61. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  62. 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
  63. 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
  64. };
  65. const uint8_t ff_mpeg1_dc_scale_table[128] = {
  66. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  67. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  68. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  69. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  70. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  71. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  72. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  73. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  74. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  75. };
  76. static const uint8_t mpeg2_dc_scale_table1[128] = {
  77. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  78. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  79. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  80. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  81. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  82. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  83. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  84. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  85. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  86. };
  87. static const uint8_t mpeg2_dc_scale_table2[128] = {
  88. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  89. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  90. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  91. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  92. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  93. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  94. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  95. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  96. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  97. };
  98. static const uint8_t mpeg2_dc_scale_table3[128] = {
  99. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  100. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  101. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  102. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  103. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  104. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  105. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  106. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  107. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  108. };
  109. const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
  110. ff_mpeg1_dc_scale_table,
  111. mpeg2_dc_scale_table1,
  112. mpeg2_dc_scale_table2,
  113. mpeg2_dc_scale_table3,
  114. };
  115. const enum PixelFormat ff_pixfmt_list_420[] = {
  116. PIX_FMT_YUV420P,
  117. PIX_FMT_NONE
  118. };
  119. const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
  120. PIX_FMT_DXVA2_VLD,
  121. PIX_FMT_VAAPI_VLD,
  122. PIX_FMT_VDA_VLD,
  123. PIX_FMT_YUV420P,
  124. PIX_FMT_NONE
  125. };
  126. const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
  127. const uint8_t *end,
  128. uint32_t * restrict state)
  129. {
  130. int i;
  131. assert(p <= end);
  132. if (p >= end)
  133. return end;
  134. for (i = 0; i < 3; i++) {
  135. uint32_t tmp = *state << 8;
  136. *state = tmp + *(p++);
  137. if (tmp == 0x100 || p == end)
  138. return p;
  139. }
  140. while (p < end) {
  141. if (p[-1] > 1 ) p += 3;
  142. else if (p[-2] ) p += 2;
  143. else if (p[-3]|(p[-1]-1)) p++;
  144. else {
  145. p++;
  146. break;
  147. }
  148. }
  149. p = FFMIN(p, end) - 4;
  150. *state = AV_RB32(p);
  151. return p + 4;
  152. }
  153. /* init common dct for both encoder and decoder */
  154. av_cold int ff_dct_common_init(MpegEncContext *s)
  155. {
  156. ff_dsputil_init(&s->dsp, s->avctx);
  157. s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
  158. s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
  159. s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
  160. s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
  161. s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
  162. if (s->flags & CODEC_FLAG_BITEXACT)
  163. s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
  164. s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
  165. #if HAVE_MMX
  166. ff_MPV_common_init_mmx(s);
  167. #elif ARCH_ALPHA
  168. ff_MPV_common_init_axp(s);
  169. #elif HAVE_MMI
  170. ff_MPV_common_init_mmi(s);
  171. #elif ARCH_ARM
  172. ff_MPV_common_init_arm(s);
  173. #elif HAVE_ALTIVEC
  174. ff_MPV_common_init_altivec(s);
  175. #elif ARCH_BFIN
  176. ff_MPV_common_init_bfin(s);
  177. #endif
  178. /* load & permutate scantables
  179. * note: only wmv uses different ones
  180. */
  181. if (s->alternate_scan) {
  182. ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
  183. ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
  184. } else {
  185. ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
  186. ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
  187. }
  188. ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
  189. ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
  190. return 0;
  191. }
  192. void ff_copy_picture(Picture *dst, Picture *src)
  193. {
  194. *dst = *src;
  195. dst->f.type = FF_BUFFER_TYPE_COPY;
  196. }
  197. /**
  198. * Release a frame buffer
  199. */
  200. static void free_frame_buffer(MpegEncContext *s, Picture *pic)
  201. {
  202. /* Windows Media Image codecs allocate internal buffers with different
  203. * dimensions; ignore user defined callbacks for these
  204. */
  205. if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
  206. ff_thread_release_buffer(s->avctx, (AVFrame *) pic);
  207. else
  208. avcodec_default_release_buffer(s->avctx, (AVFrame *) pic);
  209. av_freep(&pic->f.hwaccel_picture_private);
  210. }
  211. /**
  212. * Allocate a frame buffer
  213. */
  214. static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
  215. {
  216. int r;
  217. if (s->avctx->hwaccel) {
  218. assert(!pic->f.hwaccel_picture_private);
  219. if (s->avctx->hwaccel->priv_data_size) {
  220. pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
  221. if (!pic->f.hwaccel_picture_private) {
  222. av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
  223. return -1;
  224. }
  225. }
  226. }
  227. if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
  228. r = ff_thread_get_buffer(s->avctx, (AVFrame *) pic);
  229. else
  230. r = avcodec_default_get_buffer(s->avctx, (AVFrame *) pic);
  231. if (r < 0 || !pic->f.type || !pic->f.data[0]) {
  232. av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
  233. r, pic->f.type, pic->f.data[0]);
  234. av_freep(&pic->f.hwaccel_picture_private);
  235. return -1;
  236. }
  237. if (s->linesize && (s->linesize != pic->f.linesize[0] ||
  238. s->uvlinesize != pic->f.linesize[1])) {
  239. av_log(s->avctx, AV_LOG_ERROR,
  240. "get_buffer() failed (stride changed)\n");
  241. free_frame_buffer(s, pic);
  242. return -1;
  243. }
  244. if (pic->f.linesize[1] != pic->f.linesize[2]) {
  245. av_log(s->avctx, AV_LOG_ERROR,
  246. "get_buffer() failed (uv stride mismatch)\n");
  247. free_frame_buffer(s, pic);
  248. return -1;
  249. }
  250. return 0;
  251. }
  252. /**
  253. * Allocate a Picture.
  254. * The pixels are allocated/set by calling get_buffer() if shared = 0
  255. */
  256. int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
  257. {
  258. const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
  259. // the + 1 is needed so memset(,,stride*height) does not sig11
  260. const int mb_array_size = s->mb_stride * s->mb_height;
  261. const int b8_array_size = s->b8_stride * s->mb_height * 2;
  262. const int b4_array_size = s->b4_stride * s->mb_height * 4;
  263. int i;
  264. int r = -1;
  265. if (shared) {
  266. assert(pic->f.data[0]);
  267. assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
  268. pic->f.type = FF_BUFFER_TYPE_SHARED;
  269. } else {
  270. assert(!pic->f.data[0]);
  271. if (alloc_frame_buffer(s, pic) < 0)
  272. return -1;
  273. s->linesize = pic->f.linesize[0];
  274. s->uvlinesize = pic->f.linesize[1];
  275. }
  276. if (pic->f.qscale_table == NULL) {
  277. if (s->encoding) {
  278. FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
  279. mb_array_size * sizeof(int16_t), fail)
  280. FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
  281. mb_array_size * sizeof(int16_t), fail)
  282. FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
  283. mb_array_size * sizeof(int8_t ), fail)
  284. }
  285. FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
  286. mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
  287. FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
  288. (big_mb_num + s->mb_stride) * sizeof(uint8_t),
  289. fail)
  290. FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
  291. (big_mb_num + s->mb_stride) * sizeof(uint32_t),
  292. fail)
  293. pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
  294. pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
  295. if (s->out_format == FMT_H264) {
  296. for (i = 0; i < 2; i++) {
  297. FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
  298. 2 * (b4_array_size + 4) * sizeof(int16_t),
  299. fail)
  300. pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
  301. FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
  302. 4 * mb_array_size * sizeof(uint8_t), fail)
  303. }
  304. pic->f.motion_subsample_log2 = 2;
  305. } else if (s->out_format == FMT_H263 || s->encoding ||
  306. (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
  307. for (i = 0; i < 2; i++) {
  308. FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
  309. 2 * (b8_array_size + 4) * sizeof(int16_t),
  310. fail)
  311. pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
  312. FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
  313. 4 * mb_array_size * sizeof(uint8_t), fail)
  314. }
  315. pic->f.motion_subsample_log2 = 3;
  316. }
  317. if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
  318. FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
  319. 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
  320. }
  321. pic->f.qstride = s->mb_stride;
  322. FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
  323. 1 * sizeof(AVPanScan), fail)
  324. }
  325. pic->owner2 = s;
  326. return 0;
  327. fail: // for the FF_ALLOCZ_OR_GOTO macro
  328. if (r >= 0)
  329. free_frame_buffer(s, pic);
  330. return -1;
  331. }
  332. /**
  333. * Deallocate a picture.
  334. */
  335. static void free_picture(MpegEncContext *s, Picture *pic)
  336. {
  337. int i;
  338. if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
  339. free_frame_buffer(s, pic);
  340. }
  341. av_freep(&pic->mb_var);
  342. av_freep(&pic->mc_mb_var);
  343. av_freep(&pic->mb_mean);
  344. av_freep(&pic->f.mbskip_table);
  345. av_freep(&pic->qscale_table_base);
  346. av_freep(&pic->mb_type_base);
  347. av_freep(&pic->f.dct_coeff);
  348. av_freep(&pic->f.pan_scan);
  349. pic->f.mb_type = NULL;
  350. for (i = 0; i < 2; i++) {
  351. av_freep(&pic->motion_val_base[i]);
  352. av_freep(&pic->f.ref_index[i]);
  353. }
  354. if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
  355. for (i = 0; i < 4; i++) {
  356. pic->f.base[i] =
  357. pic->f.data[i] = NULL;
  358. }
  359. pic->f.type = 0;
  360. }
  361. }
  362. static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
  363. {
  364. int y_size = s->b8_stride * (2 * s->mb_height + 1);
  365. int c_size = s->mb_stride * (s->mb_height + 1);
  366. int yc_size = y_size + 2 * c_size;
  367. int i;
  368. // edge emu needs blocksize + filter length - 1
  369. // (= 17x17 for halfpel / 21x21 for h264)
  370. FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
  371. (s->width + 64) * 2 * 21 * 2, fail); // (width + edge + align)*interlaced*MBsize*tolerance
  372. // FIXME should be linesize instead of s->width * 2
  373. // but that is not known before get_buffer()
  374. FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
  375. (s->width + 64) * 4 * 16 * 2 * sizeof(uint8_t), fail)
  376. s->me.temp = s->me.scratchpad;
  377. s->rd_scratchpad = s->me.scratchpad;
  378. s->b_scratchpad = s->me.scratchpad;
  379. s->obmc_scratchpad = s->me.scratchpad + 16;
  380. if (s->encoding) {
  381. FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
  382. ME_MAP_SIZE * sizeof(uint32_t), fail)
  383. FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
  384. ME_MAP_SIZE * sizeof(uint32_t), fail)
  385. if (s->avctx->noise_reduction) {
  386. FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
  387. 2 * 64 * sizeof(int), fail)
  388. }
  389. }
  390. FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
  391. s->block = s->blocks[0];
  392. for (i = 0; i < 12; i++) {
  393. s->pblocks[i] = &s->block[i];
  394. }
  395. if (s->out_format == FMT_H263) {
  396. /* ac values */
  397. FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
  398. yc_size * sizeof(int16_t) * 16, fail);
  399. s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
  400. s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
  401. s->ac_val[2] = s->ac_val[1] + c_size;
  402. }
  403. return 0;
  404. fail:
  405. return -1; // free() through ff_MPV_common_end()
  406. }
  407. static void free_duplicate_context(MpegEncContext *s)
  408. {
  409. if (s == NULL)
  410. return;
  411. av_freep(&s->edge_emu_buffer);
  412. av_freep(&s->me.scratchpad);
  413. s->me.temp =
  414. s->rd_scratchpad =
  415. s->b_scratchpad =
  416. s->obmc_scratchpad = NULL;
  417. av_freep(&s->dct_error_sum);
  418. av_freep(&s->me.map);
  419. av_freep(&s->me.score_map);
  420. av_freep(&s->blocks);
  421. av_freep(&s->ac_val_base);
  422. s->block = NULL;
  423. }
  424. static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
  425. {
  426. #define COPY(a) bak->a = src->a
  427. COPY(edge_emu_buffer);
  428. COPY(me.scratchpad);
  429. COPY(me.temp);
  430. COPY(rd_scratchpad);
  431. COPY(b_scratchpad);
  432. COPY(obmc_scratchpad);
  433. COPY(me.map);
  434. COPY(me.score_map);
  435. COPY(blocks);
  436. COPY(block);
  437. COPY(start_mb_y);
  438. COPY(end_mb_y);
  439. COPY(me.map_generation);
  440. COPY(pb);
  441. COPY(dct_error_sum);
  442. COPY(dct_count[0]);
  443. COPY(dct_count[1]);
  444. COPY(ac_val_base);
  445. COPY(ac_val[0]);
  446. COPY(ac_val[1]);
  447. COPY(ac_val[2]);
  448. #undef COPY
  449. }
  450. void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
  451. {
  452. MpegEncContext bak;
  453. int i;
  454. // FIXME copy only needed parts
  455. // START_TIMER
  456. backup_duplicate_context(&bak, dst);
  457. memcpy(dst, src, sizeof(MpegEncContext));
  458. backup_duplicate_context(dst, &bak);
  459. for (i = 0; i < 12; i++) {
  460. dst->pblocks[i] = &dst->block[i];
  461. }
  462. // STOP_TIMER("update_duplicate_context")
  463. // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
  464. }
  465. int ff_mpeg_update_thread_context(AVCodecContext *dst,
  466. const AVCodecContext *src)
  467. {
  468. MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
  469. if (dst == src || !s1->context_initialized)
  470. return 0;
  471. // FIXME can parameters change on I-frames?
  472. // in that case dst may need a reinit
  473. if (!s->context_initialized) {
  474. memcpy(s, s1, sizeof(MpegEncContext));
  475. s->avctx = dst;
  476. s->picture_range_start += MAX_PICTURE_COUNT;
  477. s->picture_range_end += MAX_PICTURE_COUNT;
  478. s->bitstream_buffer = NULL;
  479. s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
  480. ff_MPV_common_init(s);
  481. }
  482. s->avctx->coded_height = s1->avctx->coded_height;
  483. s->avctx->coded_width = s1->avctx->coded_width;
  484. s->avctx->width = s1->avctx->width;
  485. s->avctx->height = s1->avctx->height;
  486. s->coded_picture_number = s1->coded_picture_number;
  487. s->picture_number = s1->picture_number;
  488. s->input_picture_number = s1->input_picture_number;
  489. memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
  490. memcpy(&s->last_picture, &s1->last_picture,
  491. (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
  492. s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
  493. s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
  494. s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
  495. // Error/bug resilience
  496. s->next_p_frame_damaged = s1->next_p_frame_damaged;
  497. s->workaround_bugs = s1->workaround_bugs;
  498. // MPEG4 timing info
  499. memcpy(&s->time_increment_bits, &s1->time_increment_bits,
  500. (char *) &s1->shape - (char *) &s1->time_increment_bits);
  501. // B-frame info
  502. s->max_b_frames = s1->max_b_frames;
  503. s->low_delay = s1->low_delay;
  504. s->dropable = s1->dropable;
  505. // DivX handling (doesn't work)
  506. s->divx_packed = s1->divx_packed;
  507. if (s1->bitstream_buffer) {
  508. if (s1->bitstream_buffer_size +
  509. FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
  510. av_fast_malloc(&s->bitstream_buffer,
  511. &s->allocated_bitstream_buffer_size,
  512. s1->allocated_bitstream_buffer_size);
  513. s->bitstream_buffer_size = s1->bitstream_buffer_size;
  514. memcpy(s->bitstream_buffer, s1->bitstream_buffer,
  515. s1->bitstream_buffer_size);
  516. memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
  517. FF_INPUT_BUFFER_PADDING_SIZE);
  518. }
  519. // MPEG2/interlacing info
  520. memcpy(&s->progressive_sequence, &s1->progressive_sequence,
  521. (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
  522. if (!s1->first_field) {
  523. s->last_pict_type = s1->pict_type;
  524. if (s1->current_picture_ptr)
  525. s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
  526. if (s1->pict_type != AV_PICTURE_TYPE_B) {
  527. s->last_non_b_pict_type = s1->pict_type;
  528. }
  529. }
  530. return 0;
  531. }
  532. /**
  533. * Set the given MpegEncContext to common defaults
  534. * (same for encoding and decoding).
  535. * The changed fields will not depend upon the
  536. * prior state of the MpegEncContext.
  537. */
  538. void ff_MPV_common_defaults(MpegEncContext *s)
  539. {
  540. s->y_dc_scale_table =
  541. s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
  542. s->chroma_qscale_table = ff_default_chroma_qscale_table;
  543. s->progressive_frame = 1;
  544. s->progressive_sequence = 1;
  545. s->picture_structure = PICT_FRAME;
  546. s->coded_picture_number = 0;
  547. s->picture_number = 0;
  548. s->input_picture_number = 0;
  549. s->picture_in_gop_number = 0;
  550. s->f_code = 1;
  551. s->b_code = 1;
  552. s->picture_range_start = 0;
  553. s->picture_range_end = MAX_PICTURE_COUNT;
  554. s->slice_context_count = 1;
  555. }
  556. /**
  557. * Set the given MpegEncContext to defaults for decoding.
  558. * the changed fields will not depend upon
  559. * the prior state of the MpegEncContext.
  560. */
  561. void ff_MPV_decode_defaults(MpegEncContext *s)
  562. {
  563. ff_MPV_common_defaults(s);
  564. }
  565. /**
  566. * init common structure for both encoder and decoder.
  567. * this assumes that some variables like width/height are already set
  568. */
  569. av_cold int ff_MPV_common_init(MpegEncContext *s)
  570. {
  571. int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
  572. int nb_slices = (HAVE_THREADS &&
  573. s->avctx->active_thread_type & FF_THREAD_SLICE) ?
  574. s->avctx->thread_count : 1;
  575. if (s->encoding && s->avctx->slices)
  576. nb_slices = s->avctx->slices;
  577. if (s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
  578. s->mb_height = (s->height + 31) / 32 * 2;
  579. else if (s->codec_id != CODEC_ID_H264)
  580. s->mb_height = (s->height + 15) / 16;
  581. if (s->avctx->pix_fmt == PIX_FMT_NONE) {
  582. av_log(s->avctx, AV_LOG_ERROR,
  583. "decoding to PIX_FMT_NONE is not supported.\n");
  584. return -1;
  585. }
  586. if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
  587. int max_slices;
  588. if (s->mb_height)
  589. max_slices = FFMIN(MAX_THREADS, s->mb_height);
  590. else
  591. max_slices = MAX_THREADS;
  592. av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
  593. " reducing to %d\n", nb_slices, max_slices);
  594. nb_slices = max_slices;
  595. }
  596. if ((s->width || s->height) &&
  597. av_image_check_size(s->width, s->height, 0, s->avctx))
  598. return -1;
  599. ff_dct_common_init(s);
  600. s->flags = s->avctx->flags;
  601. s->flags2 = s->avctx->flags2;
  602. if (s->width && s->height) {
  603. s->mb_width = (s->width + 15) / 16;
  604. s->mb_stride = s->mb_width + 1;
  605. s->b8_stride = s->mb_width * 2 + 1;
  606. s->b4_stride = s->mb_width * 4 + 1;
  607. mb_array_size = s->mb_height * s->mb_stride;
  608. mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
  609. /* set chroma shifts */
  610. avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift,
  611. &s->chroma_y_shift);
  612. /* set default edge pos, will be overriden
  613. * in decode_header if needed */
  614. s->h_edge_pos = s->mb_width * 16;
  615. s->v_edge_pos = s->mb_height * 16;
  616. s->mb_num = s->mb_width * s->mb_height;
  617. s->block_wrap[0] =
  618. s->block_wrap[1] =
  619. s->block_wrap[2] =
  620. s->block_wrap[3] = s->b8_stride;
  621. s->block_wrap[4] =
  622. s->block_wrap[5] = s->mb_stride;
  623. y_size = s->b8_stride * (2 * s->mb_height + 1);
  624. c_size = s->mb_stride * (s->mb_height + 1);
  625. yc_size = y_size + 2 * c_size;
  626. /* convert fourcc to upper case */
  627. s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
  628. s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
  629. s->avctx->coded_frame = (AVFrame *)&s->current_picture;
  630. FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
  631. fail); // error ressilience code looks cleaner with this
  632. for (y = 0; y < s->mb_height; y++)
  633. for (x = 0; x < s->mb_width; x++)
  634. s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
  635. s->mb_index2xy[s->mb_height * s->mb_width] =
  636. (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
  637. if (s->encoding) {
  638. /* Allocate MV tables */
  639. FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
  640. mv_table_size * 2 * sizeof(int16_t), fail);
  641. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
  642. mv_table_size * 2 * sizeof(int16_t), fail);
  643. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
  644. mv_table_size * 2 * sizeof(int16_t), fail);
  645. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
  646. mv_table_size * 2 * sizeof(int16_t), fail);
  647. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
  648. mv_table_size * 2 * sizeof(int16_t), fail);
  649. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
  650. mv_table_size * 2 * sizeof(int16_t), fail);
  651. s->p_mv_table = s->p_mv_table_base +
  652. s->mb_stride + 1;
  653. s->b_forw_mv_table = s->b_forw_mv_table_base +
  654. s->mb_stride + 1;
  655. s->b_back_mv_table = s->b_back_mv_table_base +
  656. s->mb_stride + 1;
  657. s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
  658. s->mb_stride + 1;
  659. s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
  660. s->mb_stride + 1;
  661. s->b_direct_mv_table = s->b_direct_mv_table_base +
  662. s->mb_stride + 1;
  663. if (s->msmpeg4_version) {
  664. FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
  665. 2 * 2 * (MAX_LEVEL + 1) *
  666. (MAX_RUN + 1) * 2 * sizeof(int), fail);
  667. }
  668. FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
  669. /* Allocate MB type table */
  670. FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
  671. sizeof(uint16_t), fail); // needed for encoding
  672. FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
  673. sizeof(int), fail);
  674. FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
  675. 64 * 32 * sizeof(int), fail);
  676. FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
  677. 64 * 32 * sizeof(int), fail);
  678. FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
  679. 64 * 32 * 2 * sizeof(uint16_t), fail);
  680. FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
  681. 64 * 32 * 2 * sizeof(uint16_t), fail);
  682. FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
  683. MAX_PICTURE_COUNT * sizeof(Picture *), fail);
  684. FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
  685. MAX_PICTURE_COUNT * sizeof(Picture *), fail);
  686. if (s->avctx->noise_reduction) {
  687. FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
  688. 2 * 64 * sizeof(uint16_t), fail);
  689. }
  690. }
  691. }
  692. s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
  693. FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
  694. s->picture_count * sizeof(Picture), fail);
  695. for (i = 0; i < s->picture_count; i++) {
  696. avcodec_get_frame_defaults((AVFrame *) &s->picture[i]);
  697. }
  698. if (s->width && s->height) {
  699. FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table,
  700. mb_array_size * sizeof(uint8_t), fail);
  701. if (s->codec_id == CODEC_ID_MPEG4 ||
  702. (s->flags & CODEC_FLAG_INTERLACED_ME)) {
  703. /* interlaced direct mode decoding tables */
  704. for (i = 0; i < 2; i++) {
  705. int j, k;
  706. for (j = 0; j < 2; j++) {
  707. for (k = 0; k < 2; k++) {
  708. FF_ALLOCZ_OR_GOTO(s->avctx,
  709. s->b_field_mv_table_base[i][j][k],
  710. mv_table_size * 2 * sizeof(int16_t),
  711. fail);
  712. s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
  713. s->mb_stride + 1;
  714. }
  715. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
  716. mb_array_size * 2 * sizeof(uint8_t),
  717. fail);
  718. FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
  719. mv_table_size * 2 * sizeof(int16_t),
  720. fail);
  721. s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
  722. + s->mb_stride + 1;
  723. }
  724. FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
  725. mb_array_size * 2 * sizeof(uint8_t),
  726. fail);
  727. }
  728. }
  729. if (s->out_format == FMT_H263) {
  730. /* cbp values */
  731. FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
  732. s->coded_block = s->coded_block_base + s->b8_stride + 1;
  733. /* cbp, ac_pred, pred_dir */
  734. FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
  735. mb_array_size * sizeof(uint8_t), fail);
  736. FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
  737. mb_array_size * sizeof(uint8_t), fail);
  738. }
  739. if (s->h263_pred || s->h263_plus || !s->encoding) {
  740. /* dc values */
  741. // MN: we need these for error resilience of intra-frames
  742. FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
  743. yc_size * sizeof(int16_t), fail);
  744. s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
  745. s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
  746. s->dc_val[2] = s->dc_val[1] + c_size;
  747. for (i = 0; i < yc_size; i++)
  748. s->dc_val_base[i] = 1024;
  749. }
  750. /* which mb is a intra block */
  751. FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
  752. memset(s->mbintra_table, 1, mb_array_size);
  753. /* init macroblock skip table */
  754. FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
  755. // Note the + 1 is for a quicker mpeg4 slice_end detection
  756. s->parse_context.state = -1;
  757. if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
  758. s->avctx->debug_mv) {
  759. s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
  760. 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
  761. s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
  762. 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
  763. s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
  764. 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
  765. }
  766. }
  767. s->context_initialized = 1;
  768. s->thread_context[0] = s;
  769. if (s->width && s->height) {
  770. if (nb_slices > 1) {
  771. for (i = 1; i < nb_slices; i++) {
  772. s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
  773. memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
  774. }
  775. for (i = 0; i < nb_slices; i++) {
  776. if (init_duplicate_context(s->thread_context[i], s) < 0)
  777. goto fail;
  778. s->thread_context[i]->start_mb_y =
  779. (s->mb_height * (i) + nb_slices / 2) / nb_slices;
  780. s->thread_context[i]->end_mb_y =
  781. (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
  782. }
  783. } else {
  784. if (init_duplicate_context(s, s) < 0)
  785. goto fail;
  786. s->start_mb_y = 0;
  787. s->end_mb_y = s->mb_height;
  788. }
  789. s->slice_context_count = nb_slices;
  790. }
  791. return 0;
  792. fail:
  793. ff_MPV_common_end(s);
  794. return -1;
  795. }
  796. /* init common structure for both encoder and decoder */
  797. void ff_MPV_common_end(MpegEncContext *s)
  798. {
  799. int i, j, k;
  800. if (s->slice_context_count > 1) {
  801. for (i = 0; i < s->slice_context_count; i++) {
  802. free_duplicate_context(s->thread_context[i]);
  803. }
  804. for (i = 1; i < s->slice_context_count; i++) {
  805. av_freep(&s->thread_context[i]);
  806. }
  807. s->slice_context_count = 1;
  808. } else free_duplicate_context(s);
  809. av_freep(&s->parse_context.buffer);
  810. s->parse_context.buffer_size = 0;
  811. av_freep(&s->mb_type);
  812. av_freep(&s->p_mv_table_base);
  813. av_freep(&s->b_forw_mv_table_base);
  814. av_freep(&s->b_back_mv_table_base);
  815. av_freep(&s->b_bidir_forw_mv_table_base);
  816. av_freep(&s->b_bidir_back_mv_table_base);
  817. av_freep(&s->b_direct_mv_table_base);
  818. s->p_mv_table = NULL;
  819. s->b_forw_mv_table = NULL;
  820. s->b_back_mv_table = NULL;
  821. s->b_bidir_forw_mv_table = NULL;
  822. s->b_bidir_back_mv_table = NULL;
  823. s->b_direct_mv_table = NULL;
  824. for (i = 0; i < 2; i++) {
  825. for (j = 0; j < 2; j++) {
  826. for (k = 0; k < 2; k++) {
  827. av_freep(&s->b_field_mv_table_base[i][j][k]);
  828. s->b_field_mv_table[i][j][k] = NULL;
  829. }
  830. av_freep(&s->b_field_select_table[i][j]);
  831. av_freep(&s->p_field_mv_table_base[i][j]);
  832. s->p_field_mv_table[i][j] = NULL;
  833. }
  834. av_freep(&s->p_field_select_table[i]);
  835. }
  836. av_freep(&s->dc_val_base);
  837. av_freep(&s->coded_block_base);
  838. av_freep(&s->mbintra_table);
  839. av_freep(&s->cbp_table);
  840. av_freep(&s->pred_dir_table);
  841. av_freep(&s->mbskip_table);
  842. av_freep(&s->bitstream_buffer);
  843. s->allocated_bitstream_buffer_size = 0;
  844. av_freep(&s->avctx->stats_out);
  845. av_freep(&s->ac_stats);
  846. av_freep(&s->error_status_table);
  847. av_freep(&s->mb_index2xy);
  848. av_freep(&s->lambda_table);
  849. av_freep(&s->q_intra_matrix);
  850. av_freep(&s->q_inter_matrix);
  851. av_freep(&s->q_intra_matrix16);
  852. av_freep(&s->q_inter_matrix16);
  853. av_freep(&s->input_picture);
  854. av_freep(&s->reordered_input_picture);
  855. av_freep(&s->dct_offset);
  856. if (s->picture && !s->avctx->internal->is_copy) {
  857. for (i = 0; i < s->picture_count; i++) {
  858. free_picture(s, &s->picture[i]);
  859. }
  860. }
  861. av_freep(&s->picture);
  862. s->context_initialized = 0;
  863. s->last_picture_ptr =
  864. s->next_picture_ptr =
  865. s->current_picture_ptr = NULL;
  866. s->linesize = s->uvlinesize = 0;
  867. for (i = 0; i < 3; i++)
  868. av_freep(&s->visualization_buffer[i]);
  869. if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
  870. avcodec_default_free_buffers(s->avctx);
  871. }
  872. void ff_init_rl(RLTable *rl,
  873. uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
  874. {
  875. int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
  876. uint8_t index_run[MAX_RUN + 1];
  877. int last, run, level, start, end, i;
  878. /* If table is static, we can quit if rl->max_level[0] is not NULL */
  879. if (static_store && rl->max_level[0])
  880. return;
  881. /* compute max_level[], max_run[] and index_run[] */
  882. for (last = 0; last < 2; last++) {
  883. if (last == 0) {
  884. start = 0;
  885. end = rl->last;
  886. } else {
  887. start = rl->last;
  888. end = rl->n;
  889. }
  890. memset(max_level, 0, MAX_RUN + 1);
  891. memset(max_run, 0, MAX_LEVEL + 1);
  892. memset(index_run, rl->n, MAX_RUN + 1);
  893. for (i = start; i < end; i++) {
  894. run = rl->table_run[i];
  895. level = rl->table_level[i];
  896. if (index_run[run] == rl->n)
  897. index_run[run] = i;
  898. if (level > max_level[run])
  899. max_level[run] = level;
  900. if (run > max_run[level])
  901. max_run[level] = run;
  902. }
  903. if (static_store)
  904. rl->max_level[last] = static_store[last];
  905. else
  906. rl->max_level[last] = av_malloc(MAX_RUN + 1);
  907. memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
  908. if (static_store)
  909. rl->max_run[last] = static_store[last] + MAX_RUN + 1;
  910. else
  911. rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
  912. memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
  913. if (static_store)
  914. rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
  915. else
  916. rl->index_run[last] = av_malloc(MAX_RUN + 1);
  917. memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
  918. }
  919. }
  920. void ff_init_vlc_rl(RLTable *rl)
  921. {
  922. int i, q;
  923. for (q = 0; q < 32; q++) {
  924. int qmul = q * 2;
  925. int qadd = (q - 1) | 1;
  926. if (q == 0) {
  927. qmul = 1;
  928. qadd = 0;
  929. }
  930. for (i = 0; i < rl->vlc.table_size; i++) {
  931. int code = rl->vlc.table[i][0];
  932. int len = rl->vlc.table[i][1];
  933. int level, run;
  934. if (len == 0) { // illegal code
  935. run = 66;
  936. level = MAX_LEVEL;
  937. } else if (len < 0) { // more bits needed
  938. run = 0;
  939. level = code;
  940. } else {
  941. if (code == rl->n) { // esc
  942. run = 66;
  943. level = 0;
  944. } else {
  945. run = rl->table_run[code] + 1;
  946. level = rl->table_level[code] * qmul + qadd;
  947. if (code >= rl->last) run += 192;
  948. }
  949. }
  950. rl->rl_vlc[q][i].len = len;
  951. rl->rl_vlc[q][i].level = level;
  952. rl->rl_vlc[q][i].run = run;
  953. }
  954. }
  955. }
  956. void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
  957. {
  958. int i;
  959. /* release non reference frames */
  960. for (i = 0; i < s->picture_count; i++) {
  961. if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
  962. (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
  963. (remove_current || &s->picture[i] != s->current_picture_ptr)
  964. /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
  965. free_frame_buffer(s, &s->picture[i]);
  966. }
  967. }
  968. }
  969. int ff_find_unused_picture(MpegEncContext *s, int shared)
  970. {
  971. int i;
  972. if (shared) {
  973. for (i = s->picture_range_start; i < s->picture_range_end; i++) {
  974. if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
  975. return i;
  976. }
  977. } else {
  978. for (i = s->picture_range_start; i < s->picture_range_end; i++) {
  979. if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
  980. return i; // FIXME
  981. }
  982. for (i = s->picture_range_start; i < s->picture_range_end; i++) {
  983. if (s->picture[i].f.data[0] == NULL)
  984. return i;
  985. }
  986. }
  987. return AVERROR_INVALIDDATA;
  988. }
  989. static void update_noise_reduction(MpegEncContext *s)
  990. {
  991. int intra, i;
  992. for (intra = 0; intra < 2; intra++) {
  993. if (s->dct_count[intra] > (1 << 16)) {
  994. for (i = 0; i < 64; i++) {
  995. s->dct_error_sum[intra][i] >>= 1;
  996. }
  997. s->dct_count[intra] >>= 1;
  998. }
  999. for (i = 0; i < 64; i++) {
  1000. s->dct_offset[intra][i] = (s->avctx->noise_reduction *
  1001. s->dct_count[intra] +
  1002. s->dct_error_sum[intra][i] / 2) /
  1003. (s->dct_error_sum[intra][i] + 1);
  1004. }
  1005. }
  1006. }
  1007. /**
  1008. * generic function for encode/decode called after coding/decoding
  1009. * the header and before a frame is coded/decoded.
  1010. */
  1011. int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
  1012. {
  1013. int i;
  1014. Picture *pic;
  1015. s->mb_skipped = 0;
  1016. assert(s->last_picture_ptr == NULL || s->out_format != FMT_H264 ||
  1017. s->codec_id == CODEC_ID_SVQ3);
  1018. /* mark & release old frames */
  1019. if (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3) {
  1020. if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
  1021. s->last_picture_ptr != s->next_picture_ptr &&
  1022. s->last_picture_ptr->f.data[0]) {
  1023. if (s->last_picture_ptr->owner2 == s)
  1024. free_frame_buffer(s, s->last_picture_ptr);
  1025. }
  1026. /* release forgotten pictures */
  1027. /* if (mpeg124/h263) */
  1028. if (!s->encoding) {
  1029. for (i = 0; i < s->picture_count; i++) {
  1030. if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
  1031. &s->picture[i] != s->last_picture_ptr &&
  1032. &s->picture[i] != s->next_picture_ptr &&
  1033. s->picture[i].f.reference) {
  1034. if (!(avctx->active_thread_type & FF_THREAD_FRAME))
  1035. av_log(avctx, AV_LOG_ERROR,
  1036. "releasing zombie picture\n");
  1037. free_frame_buffer(s, &s->picture[i]);
  1038. }
  1039. }
  1040. }
  1041. }
  1042. if (!s->encoding) {
  1043. ff_release_unused_pictures(s, 1);
  1044. if (s->current_picture_ptr &&
  1045. s->current_picture_ptr->f.data[0] == NULL) {
  1046. // we already have a unused image
  1047. // (maybe it was set before reading the header)
  1048. pic = s->current_picture_ptr;
  1049. } else {
  1050. i = ff_find_unused_picture(s, 0);
  1051. pic = &s->picture[i];
  1052. }
  1053. pic->f.reference = 0;
  1054. if (!s->dropable) {
  1055. if (s->codec_id == CODEC_ID_H264)
  1056. pic->f.reference = s->picture_structure;
  1057. else if (s->pict_type != AV_PICTURE_TYPE_B)
  1058. pic->f.reference = 3;
  1059. }
  1060. pic->f.coded_picture_number = s->coded_picture_number++;
  1061. if (ff_alloc_picture(s, pic, 0) < 0)
  1062. return -1;
  1063. s->current_picture_ptr = pic;
  1064. // FIXME use only the vars from current_pic
  1065. s->current_picture_ptr->f.top_field_first = s->top_field_first;
  1066. if (s->codec_id == CODEC_ID_MPEG1VIDEO ||
  1067. s->codec_id == CODEC_ID_MPEG2VIDEO) {
  1068. if (s->picture_structure != PICT_FRAME)
  1069. s->current_picture_ptr->f.top_field_first =
  1070. (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
  1071. }
  1072. s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
  1073. !s->progressive_sequence;
  1074. s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
  1075. }
  1076. s->current_picture_ptr->f.pict_type = s->pict_type;
  1077. // if (s->flags && CODEC_FLAG_QSCALE)
  1078. // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
  1079. s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
  1080. ff_copy_picture(&s->current_picture, s->current_picture_ptr);
  1081. if (s->pict_type != AV_PICTURE_TYPE_B) {
  1082. s->last_picture_ptr = s->next_picture_ptr;
  1083. if (!s->dropable)
  1084. s->next_picture_ptr = s->current_picture_ptr;
  1085. }
  1086. /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
  1087. s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
  1088. s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
  1089. s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
  1090. s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
  1091. s->pict_type, s->dropable); */
  1092. if (s->codec_id != CODEC_ID_H264) {
  1093. if ((s->last_picture_ptr == NULL ||
  1094. s->last_picture_ptr->f.data[0] == NULL) &&
  1095. (s->pict_type != AV_PICTURE_TYPE_I ||
  1096. s->picture_structure != PICT_FRAME)) {
  1097. if (s->pict_type != AV_PICTURE_TYPE_I)
  1098. av_log(avctx, AV_LOG_ERROR,
  1099. "warning: first frame is no keyframe\n");
  1100. else if (s->picture_structure != PICT_FRAME)
  1101. av_log(avctx, AV_LOG_INFO,
  1102. "allocate dummy last picture for field based first keyframe\n");
  1103. /* Allocate a dummy frame */
  1104. i = ff_find_unused_picture(s, 0);
  1105. s->last_picture_ptr = &s->picture[i];
  1106. if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
  1107. return -1;
  1108. ff_thread_report_progress((AVFrame *) s->last_picture_ptr,
  1109. INT_MAX, 0);
  1110. ff_thread_report_progress((AVFrame *) s->last_picture_ptr,
  1111. INT_MAX, 1);
  1112. }
  1113. if ((s->next_picture_ptr == NULL ||
  1114. s->next_picture_ptr->f.data[0] == NULL) &&
  1115. s->pict_type == AV_PICTURE_TYPE_B) {
  1116. /* Allocate a dummy frame */
  1117. i = ff_find_unused_picture(s, 0);
  1118. s->next_picture_ptr = &s->picture[i];
  1119. if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
  1120. return -1;
  1121. ff_thread_report_progress((AVFrame *) s->next_picture_ptr,
  1122. INT_MAX, 0);
  1123. ff_thread_report_progress((AVFrame *) s->next_picture_ptr,
  1124. INT_MAX, 1);
  1125. }
  1126. }
  1127. if (s->last_picture_ptr)
  1128. ff_copy_picture(&s->last_picture, s->last_picture_ptr);
  1129. if (s->next_picture_ptr)
  1130. ff_copy_picture(&s->next_picture, s->next_picture_ptr);
  1131. if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME) &&
  1132. (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3)) {
  1133. if (s->next_picture_ptr)
  1134. s->next_picture_ptr->owner2 = s;
  1135. if (s->last_picture_ptr)
  1136. s->last_picture_ptr->owner2 = s;
  1137. }
  1138. assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
  1139. s->last_picture_ptr->f.data[0]));
  1140. if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
  1141. int i;
  1142. for (i = 0; i < 4; i++) {
  1143. if (s->picture_structure == PICT_BOTTOM_FIELD) {
  1144. s->current_picture.f.data[i] +=
  1145. s->current_picture.f.linesize[i];
  1146. }
  1147. s->current_picture.f.linesize[i] *= 2;
  1148. s->last_picture.f.linesize[i] *= 2;
  1149. s->next_picture.f.linesize[i] *= 2;
  1150. }
  1151. }
  1152. s->err_recognition = avctx->err_recognition;
  1153. /* set dequantizer, we can't do it during init as
  1154. * it might change for mpeg4 and we can't do it in the header
  1155. * decode as init is not called for mpeg4 there yet */
  1156. if (s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO) {
  1157. s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
  1158. s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
  1159. } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
  1160. s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
  1161. s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
  1162. } else {
  1163. s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
  1164. s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
  1165. }
  1166. if (s->dct_error_sum) {
  1167. assert(s->avctx->noise_reduction && s->encoding);
  1168. update_noise_reduction(s);
  1169. }
  1170. if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
  1171. return ff_xvmc_field_start(s, avctx);
  1172. return 0;
  1173. }
  1174. /* generic function for encode/decode called after a
  1175. * frame has been coded/decoded. */
  1176. void ff_MPV_frame_end(MpegEncContext *s)
  1177. {
  1178. int i;
  1179. /* redraw edges for the frame if decoding didn't complete */
  1180. // just to make sure that all data is rendered.
  1181. if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
  1182. ff_xvmc_field_end(s);
  1183. } else if ((s->error_count || s->encoding) &&
  1184. !s->avctx->hwaccel &&
  1185. !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
  1186. s->unrestricted_mv &&
  1187. s->current_picture.f.reference &&
  1188. !s->intra_only &&
  1189. !(s->flags & CODEC_FLAG_EMU_EDGE)) {
  1190. int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
  1191. int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
  1192. s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
  1193. s->h_edge_pos, s->v_edge_pos,
  1194. EDGE_WIDTH, EDGE_WIDTH,
  1195. EDGE_TOP | EDGE_BOTTOM);
  1196. s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
  1197. s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
  1198. EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
  1199. EDGE_TOP | EDGE_BOTTOM);
  1200. s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
  1201. s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
  1202. EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
  1203. EDGE_TOP | EDGE_BOTTOM);
  1204. }
  1205. emms_c();
  1206. s->last_pict_type = s->pict_type;
  1207. s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
  1208. if (s->pict_type!= AV_PICTURE_TYPE_B) {
  1209. s->last_non_b_pict_type = s->pict_type;
  1210. }
  1211. #if 0
  1212. /* copy back current_picture variables */
  1213. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1214. if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
  1215. s->picture[i] = s->current_picture;
  1216. break;
  1217. }
  1218. }
  1219. assert(i < MAX_PICTURE_COUNT);
  1220. #endif
  1221. if (s->encoding) {
  1222. /* release non-reference frames */
  1223. for (i = 0; i < s->picture_count; i++) {
  1224. if (s->picture[i].f.data[0] && !s->picture[i].f.reference
  1225. /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
  1226. free_frame_buffer(s, &s->picture[i]);
  1227. }
  1228. }
  1229. }
  1230. // clear copies, to avoid confusion
  1231. #if 0
  1232. memset(&s->last_picture, 0, sizeof(Picture));
  1233. memset(&s->next_picture, 0, sizeof(Picture));
  1234. memset(&s->current_picture, 0, sizeof(Picture));
  1235. #endif
  1236. s->avctx->coded_frame = (AVFrame *) s->current_picture_ptr;
  1237. if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
  1238. ff_thread_report_progress((AVFrame *) s->current_picture_ptr,
  1239. s->mb_height - 1, 0);
  1240. }
  1241. }
  1242. /**
  1243. * Draw a line from (ex, ey) -> (sx, sy).
  1244. * @param w width of the image
  1245. * @param h height of the image
  1246. * @param stride stride/linesize of the image
  1247. * @param color color of the arrow
  1248. */
  1249. static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
  1250. int w, int h, int stride, int color)
  1251. {
  1252. int x, y, fr, f;
  1253. sx = av_clip(sx, 0, w - 1);
  1254. sy = av_clip(sy, 0, h - 1);
  1255. ex = av_clip(ex, 0, w - 1);
  1256. ey = av_clip(ey, 0, h - 1);
  1257. buf[sy * stride + sx] += color;
  1258. if (FFABS(ex - sx) > FFABS(ey - sy)) {
  1259. if (sx > ex) {
  1260. FFSWAP(int, sx, ex);
  1261. FFSWAP(int, sy, ey);
  1262. }
  1263. buf += sx + sy * stride;
  1264. ex -= sx;
  1265. f = ((ey - sy) << 16) / ex;
  1266. for (x = 0; x = ex; x++) {
  1267. y = (x * f) >> 16;
  1268. fr = (x * f) & 0xFFFF;
  1269. buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
  1270. buf[(y + 1) * stride + x] += (color * fr ) >> 16;
  1271. }
  1272. } else {
  1273. if (sy > ey) {
  1274. FFSWAP(int, sx, ex);
  1275. FFSWAP(int, sy, ey);
  1276. }
  1277. buf += sx + sy * stride;
  1278. ey -= sy;
  1279. if (ey)
  1280. f = ((ex - sx) << 16) / ey;
  1281. else
  1282. f = 0;
  1283. for (y = 0; y = ey; y++) {
  1284. x = (y * f) >> 16;
  1285. fr = (y * f) & 0xFFFF;
  1286. buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
  1287. buf[y * stride + x + 1] += (color * fr ) >> 16;
  1288. }
  1289. }
  1290. }
  1291. /**
  1292. * Draw an arrow from (ex, ey) -> (sx, sy).
  1293. * @param w width of the image
  1294. * @param h height of the image
  1295. * @param stride stride/linesize of the image
  1296. * @param color color of the arrow
  1297. */
  1298. static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
  1299. int ey, int w, int h, int stride, int color)
  1300. {
  1301. int dx,dy;
  1302. sx = av_clip(sx, -100, w + 100);
  1303. sy = av_clip(sy, -100, h + 100);
  1304. ex = av_clip(ex, -100, w + 100);
  1305. ey = av_clip(ey, -100, h + 100);
  1306. dx = ex - sx;
  1307. dy = ey - sy;
  1308. if (dx * dx + dy * dy > 3 * 3) {
  1309. int rx = dx + dy;
  1310. int ry = -dx + dy;
  1311. int length = ff_sqrt((rx * rx + ry * ry) << 8);
  1312. // FIXME subpixel accuracy
  1313. rx = ROUNDED_DIV(rx * 3 << 4, length);
  1314. ry = ROUNDED_DIV(ry * 3 << 4, length);
  1315. draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
  1316. draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
  1317. }
  1318. draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
  1319. }
  1320. /**
  1321. * Print debugging info for the given picture.
  1322. */
  1323. void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
  1324. {
  1325. if (s->avctx->hwaccel || !pict || !pict->mb_type)
  1326. return;
  1327. if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
  1328. int x,y;
  1329. av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
  1330. switch (pict->pict_type) {
  1331. case AV_PICTURE_TYPE_I:
  1332. av_log(s->avctx,AV_LOG_DEBUG,"I\n");
  1333. break;
  1334. case AV_PICTURE_TYPE_P:
  1335. av_log(s->avctx,AV_LOG_DEBUG,"P\n");
  1336. break;
  1337. case AV_PICTURE_TYPE_B:
  1338. av_log(s->avctx,AV_LOG_DEBUG,"B\n");
  1339. break;
  1340. case AV_PICTURE_TYPE_S:
  1341. av_log(s->avctx,AV_LOG_DEBUG,"S\n");
  1342. break;
  1343. case AV_PICTURE_TYPE_SI:
  1344. av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
  1345. break;
  1346. case AV_PICTURE_TYPE_SP:
  1347. av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
  1348. break;
  1349. }
  1350. for (y = 0; y < s->mb_height; y++) {
  1351. for (x = 0; x < s->mb_width; x++) {
  1352. if (s->avctx->debug & FF_DEBUG_SKIP) {
  1353. int count = s->mbskip_table[x + y * s->mb_stride];
  1354. if (count > 9)
  1355. count = 9;
  1356. av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
  1357. }
  1358. if (s->avctx->debug & FF_DEBUG_QP) {
  1359. av_log(s->avctx, AV_LOG_DEBUG, "%2d",
  1360. pict->qscale_table[x + y * s->mb_stride]);
  1361. }
  1362. if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
  1363. int mb_type = pict->mb_type[x + y * s->mb_stride];
  1364. // Type & MV direction
  1365. if (IS_PCM(mb_type))
  1366. av_log(s->avctx, AV_LOG_DEBUG, "P");
  1367. else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
  1368. av_log(s->avctx, AV_LOG_DEBUG, "A");
  1369. else if (IS_INTRA4x4(mb_type))
  1370. av_log(s->avctx, AV_LOG_DEBUG, "i");
  1371. else if (IS_INTRA16x16(mb_type))
  1372. av_log(s->avctx, AV_LOG_DEBUG, "I");
  1373. else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
  1374. av_log(s->avctx, AV_LOG_DEBUG, "d");
  1375. else if (IS_DIRECT(mb_type))
  1376. av_log(s->avctx, AV_LOG_DEBUG, "D");
  1377. else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
  1378. av_log(s->avctx, AV_LOG_DEBUG, "g");
  1379. else if (IS_GMC(mb_type))
  1380. av_log(s->avctx, AV_LOG_DEBUG, "G");
  1381. else if (IS_SKIP(mb_type))
  1382. av_log(s->avctx, AV_LOG_DEBUG, "S");
  1383. else if (!USES_LIST(mb_type, 1))
  1384. av_log(s->avctx, AV_LOG_DEBUG, ">");
  1385. else if (!USES_LIST(mb_type, 0))
  1386. av_log(s->avctx, AV_LOG_DEBUG, "<");
  1387. else {
  1388. assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
  1389. av_log(s->avctx, AV_LOG_DEBUG, "X");
  1390. }
  1391. // segmentation
  1392. if (IS_8X8(mb_type))
  1393. av_log(s->avctx, AV_LOG_DEBUG, "+");
  1394. else if (IS_16X8(mb_type))
  1395. av_log(s->avctx, AV_LOG_DEBUG, "-");
  1396. else if (IS_8X16(mb_type))
  1397. av_log(s->avctx, AV_LOG_DEBUG, "|");
  1398. else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
  1399. av_log(s->avctx, AV_LOG_DEBUG, " ");
  1400. else
  1401. av_log(s->avctx, AV_LOG_DEBUG, "?");
  1402. if (IS_INTERLACED(mb_type))
  1403. av_log(s->avctx, AV_LOG_DEBUG, "=");
  1404. else
  1405. av_log(s->avctx, AV_LOG_DEBUG, " ");
  1406. }
  1407. // av_log(s->avctx, AV_LOG_DEBUG, " ");
  1408. }
  1409. av_log(s->avctx, AV_LOG_DEBUG, "\n");
  1410. }
  1411. }
  1412. if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
  1413. (s->avctx->debug_mv)) {
  1414. const int shift = 1 + s->quarter_sample;
  1415. int mb_y;
  1416. uint8_t *ptr;
  1417. int i;
  1418. int h_chroma_shift, v_chroma_shift, block_height;
  1419. const int width = s->avctx->width;
  1420. const int height = s->avctx->height;
  1421. const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
  1422. const int mv_stride = (s->mb_width << mv_sample_log2) +
  1423. (s->codec_id == CODEC_ID_H264 ? 0 : 1);
  1424. s->low_delay = 0; // needed to see the vectors without trashing the buffers
  1425. avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
  1426. &h_chroma_shift, &v_chroma_shift);
  1427. for (i = 0; i < 3; i++) {
  1428. memcpy(s->visualization_buffer[i], pict->data[i],
  1429. (i == 0) ? pict->linesize[i] * height:
  1430. pict->linesize[i] * height >> v_chroma_shift);
  1431. pict->data[i] = s->visualization_buffer[i];
  1432. }
  1433. pict->type = FF_BUFFER_TYPE_COPY;
  1434. ptr = pict->data[0];
  1435. block_height = 16 >> v_chroma_shift;
  1436. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  1437. int mb_x;
  1438. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  1439. const int mb_index = mb_x + mb_y * s->mb_stride;
  1440. if ((s->avctx->debug_mv) && pict->motion_val) {
  1441. int type;
  1442. for (type = 0; type < 3; type++) {
  1443. int direction = 0;
  1444. switch (type) {
  1445. case 0:
  1446. if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
  1447. (pict->pict_type!= AV_PICTURE_TYPE_P))
  1448. continue;
  1449. direction = 0;
  1450. break;
  1451. case 1:
  1452. if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
  1453. (pict->pict_type!= AV_PICTURE_TYPE_B))
  1454. continue;
  1455. direction = 0;
  1456. break;
  1457. case 2:
  1458. if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
  1459. (pict->pict_type!= AV_PICTURE_TYPE_B))
  1460. continue;
  1461. direction = 1;
  1462. break;
  1463. }
  1464. if (!USES_LIST(pict->mb_type[mb_index], direction))
  1465. continue;
  1466. if (IS_8X8(pict->mb_type[mb_index])) {
  1467. int i;
  1468. for (i = 0; i < 4; i++) {
  1469. int sx = mb_x * 16 + 4 + 8 * (i & 1);
  1470. int sy = mb_y * 16 + 4 + 8 * (i >> 1);
  1471. int xy = (mb_x * 2 + (i & 1) +
  1472. (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
  1473. int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
  1474. int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
  1475. draw_arrow(ptr, sx, sy, mx, my, width,
  1476. height, s->linesize, 100);
  1477. }
  1478. } else if (IS_16X8(pict->mb_type[mb_index])) {
  1479. int i;
  1480. for (i = 0; i < 2; i++) {
  1481. int sx = mb_x * 16 + 8;
  1482. int sy = mb_y * 16 + 4 + 8 * i;
  1483. int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
  1484. int mx = (pict->motion_val[direction][xy][0] >> shift);
  1485. int my = (pict->motion_val[direction][xy][1] >> shift);
  1486. if (IS_INTERLACED(pict->mb_type[mb_index]))
  1487. my *= 2;
  1488. draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
  1489. height, s->linesize, 100);
  1490. }
  1491. } else if (IS_8X16(pict->mb_type[mb_index])) {
  1492. int i;
  1493. for (i = 0; i < 2; i++) {
  1494. int sx = mb_x * 16 + 4 + 8 * i;
  1495. int sy = mb_y * 16 + 8;
  1496. int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
  1497. int mx = pict->motion_val[direction][xy][0] >> shift;
  1498. int my = pict->motion_val[direction][xy][1] >> shift;
  1499. if (IS_INTERLACED(pict->mb_type[mb_index]))
  1500. my *= 2;
  1501. draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
  1502. height, s->linesize, 100);
  1503. }
  1504. } else {
  1505. int sx = mb_x * 16 + 8;
  1506. int sy = mb_y * 16 + 8;
  1507. int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
  1508. int mx = pict->motion_val[direction][xy][0] >> shift + sx;
  1509. int my = pict->motion_val[direction][xy][1] >> shift + sy;
  1510. draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
  1511. }
  1512. }
  1513. }
  1514. if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
  1515. uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
  1516. 0x0101010101010101ULL;
  1517. int y;
  1518. for (y = 0; y < block_height; y++) {
  1519. *(uint64_t *)(pict->data[1] + 8 * mb_x +
  1520. (block_height * mb_y + y) *
  1521. pict->linesize[1]) = c;
  1522. *(uint64_t *)(pict->data[2] + 8 * mb_x +
  1523. (block_height * mb_y + y) *
  1524. pict->linesize[2]) = c;
  1525. }
  1526. }
  1527. if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
  1528. pict->motion_val) {
  1529. int mb_type = pict->mb_type[mb_index];
  1530. uint64_t u,v;
  1531. int y;
  1532. #define COLOR(theta, r) \
  1533. u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
  1534. v = (int)(128 + r * sin(theta * 3.141592 / 180));
  1535. u = v = 128;
  1536. if (IS_PCM(mb_type)) {
  1537. COLOR(120, 48)
  1538. } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
  1539. IS_INTRA16x16(mb_type)) {
  1540. COLOR(30, 48)
  1541. } else if (IS_INTRA4x4(mb_type)) {
  1542. COLOR(90, 48)
  1543. } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
  1544. // COLOR(120, 48)
  1545. } else if (IS_DIRECT(mb_type)) {
  1546. COLOR(150, 48)
  1547. } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
  1548. COLOR(170, 48)
  1549. } else if (IS_GMC(mb_type)) {
  1550. COLOR(190, 48)
  1551. } else if (IS_SKIP(mb_type)) {
  1552. // COLOR(180, 48)
  1553. } else if (!USES_LIST(mb_type, 1)) {
  1554. COLOR(240, 48)
  1555. } else if (!USES_LIST(mb_type, 0)) {
  1556. COLOR(0, 48)
  1557. } else {
  1558. assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
  1559. COLOR(300,48)
  1560. }
  1561. u *= 0x0101010101010101ULL;
  1562. v *= 0x0101010101010101ULL;
  1563. for (y = 0; y < block_height; y++) {
  1564. *(uint64_t *)(pict->data[1] + 8 * mb_x +
  1565. (block_height * mb_y + y) * pict->linesize[1]) = u;
  1566. *(uint64_t *)(pict->data[2] + 8 * mb_x +
  1567. (block_height * mb_y + y) * pict->linesize[2]) = v;
  1568. }
  1569. // segmentation
  1570. if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
  1571. *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
  1572. (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
  1573. *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
  1574. (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
  1575. }
  1576. if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
  1577. for (y = 0; y < 16; y++)
  1578. pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
  1579. pict->linesize[0]] ^= 0x80;
  1580. }
  1581. if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
  1582. int dm = 1 << (mv_sample_log2 - 2);
  1583. for (i = 0; i < 4; i++) {
  1584. int sx = mb_x * 16 + 8 * (i & 1);
  1585. int sy = mb_y * 16 + 8 * (i >> 1);
  1586. int xy = (mb_x * 2 + (i & 1) +
  1587. (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
  1588. // FIXME bidir
  1589. int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
  1590. if (mv[0] != mv[dm] ||
  1591. mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
  1592. for (y = 0; y < 8; y++)
  1593. pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
  1594. if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
  1595. *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
  1596. pict->linesize[0]) ^= 0x8080808080808080ULL;
  1597. }
  1598. }
  1599. if (IS_INTERLACED(mb_type) &&
  1600. s->codec_id == CODEC_ID_H264) {
  1601. // hmm
  1602. }
  1603. }
  1604. s->mbskip_table[mb_index] = 0;
  1605. }
  1606. }
  1607. }
  1608. }
  1609. static inline int hpel_motion_lowres(MpegEncContext *s,
  1610. uint8_t *dest, uint8_t *src,
  1611. int field_based, int field_select,
  1612. int src_x, int src_y,
  1613. int width, int height, int stride,
  1614. int h_edge_pos, int v_edge_pos,
  1615. int w, int h, h264_chroma_mc_func *pix_op,
  1616. int motion_x, int motion_y)
  1617. {
  1618. const int lowres = s->avctx->lowres;
  1619. const int op_index = FFMIN(lowres, 2);
  1620. const int s_mask = (2 << lowres) - 1;
  1621. int emu = 0;
  1622. int sx, sy;
  1623. if (s->quarter_sample) {
  1624. motion_x /= 2;
  1625. motion_y /= 2;
  1626. }
  1627. sx = motion_x & s_mask;
  1628. sy = motion_y & s_mask;
  1629. src_x += motion_x >> lowres + 1;
  1630. src_y += motion_y >> lowres + 1;
  1631. src += src_y * stride + src_x;
  1632. if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
  1633. (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
  1634. s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
  1635. (h + 1) << field_based, src_x,
  1636. src_y << field_based,
  1637. h_edge_pos,
  1638. v_edge_pos);
  1639. src = s->edge_emu_buffer;
  1640. emu = 1;
  1641. }
  1642. sx = (sx << 2) >> lowres;
  1643. sy = (sy << 2) >> lowres;
  1644. if (field_select)
  1645. src += s->linesize;
  1646. pix_op[op_index](dest, src, stride, h, sx, sy);
  1647. return emu;
  1648. }
  1649. /* apply one mpeg motion vector to the three components */
  1650. static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
  1651. uint8_t *dest_y,
  1652. uint8_t *dest_cb,
  1653. uint8_t *dest_cr,
  1654. int field_based,
  1655. int bottom_field,
  1656. int field_select,
  1657. uint8_t **ref_picture,
  1658. h264_chroma_mc_func *pix_op,
  1659. int motion_x, int motion_y,
  1660. int h, int mb_y)
  1661. {
  1662. uint8_t *ptr_y, *ptr_cb, *ptr_cr;
  1663. int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
  1664. uvsx, uvsy;
  1665. const int lowres = s->avctx->lowres;
  1666. const int op_index = FFMIN(lowres, 2);
  1667. const int block_s = 8>>lowres;
  1668. const int s_mask = (2 << lowres) - 1;
  1669. const int h_edge_pos = s->h_edge_pos >> lowres;
  1670. const int v_edge_pos = s->v_edge_pos >> lowres;
  1671. linesize = s->current_picture.f.linesize[0] << field_based;
  1672. uvlinesize = s->current_picture.f.linesize[1] << field_based;
  1673. // FIXME obviously not perfect but qpel will not work in lowres anyway
  1674. if (s->quarter_sample) {
  1675. motion_x /= 2;
  1676. motion_y /= 2;
  1677. }
  1678. if (field_based) {
  1679. motion_y += (bottom_field - field_select) * (1 << lowres - 1);
  1680. }
  1681. sx = motion_x & s_mask;
  1682. sy = motion_y & s_mask;
  1683. src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
  1684. src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
  1685. if (s->out_format == FMT_H263) {
  1686. uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
  1687. uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
  1688. uvsrc_x = src_x >> 1;
  1689. uvsrc_y = src_y >> 1;
  1690. } else if (s->out_format == FMT_H261) {
  1691. // even chroma mv's are full pel in H261
  1692. mx = motion_x / 4;
  1693. my = motion_y / 4;
  1694. uvsx = (2 * mx) & s_mask;
  1695. uvsy = (2 * my) & s_mask;
  1696. uvsrc_x = s->mb_x * block_s + (mx >> lowres);
  1697. uvsrc_y = mb_y * block_s + (my >> lowres);
  1698. } else {
  1699. mx = motion_x / 2;
  1700. my = motion_y / 2;
  1701. uvsx = mx & s_mask;
  1702. uvsy = my & s_mask;
  1703. uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
  1704. uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
  1705. }
  1706. ptr_y = ref_picture[0] + src_y * linesize + src_x;
  1707. ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
  1708. ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
  1709. if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) ||
  1710. (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
  1711. s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
  1712. s->linesize, 17, 17 + field_based,
  1713. src_x, src_y << field_based, h_edge_pos,
  1714. v_edge_pos);
  1715. ptr_y = s->edge_emu_buffer;
  1716. if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
  1717. uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
  1718. s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9,
  1719. 9 + field_based,
  1720. uvsrc_x, uvsrc_y << field_based,
  1721. h_edge_pos >> 1, v_edge_pos >> 1);
  1722. s->dsp.emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize, 9,
  1723. 9 + field_based,
  1724. uvsrc_x, uvsrc_y << field_based,
  1725. h_edge_pos >> 1, v_edge_pos >> 1);
  1726. ptr_cb = uvbuf;
  1727. ptr_cr = uvbuf + 16;
  1728. }
  1729. }
  1730. // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
  1731. if (bottom_field) {
  1732. dest_y += s->linesize;
  1733. dest_cb += s->uvlinesize;
  1734. dest_cr += s->uvlinesize;
  1735. }
  1736. if (field_select) {
  1737. ptr_y += s->linesize;
  1738. ptr_cb += s->uvlinesize;
  1739. ptr_cr += s->uvlinesize;
  1740. }
  1741. sx = (sx << 2) >> lowres;
  1742. sy = (sy << 2) >> lowres;
  1743. pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
  1744. if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
  1745. uvsx = (uvsx << 2) >> lowres;
  1746. uvsy = (uvsy << 2) >> lowres;
  1747. pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift,
  1748. uvsx, uvsy);
  1749. pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift,
  1750. uvsx, uvsy);
  1751. }
  1752. // FIXME h261 lowres loop filter
  1753. }
  1754. static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
  1755. uint8_t *dest_cb, uint8_t *dest_cr,
  1756. uint8_t **ref_picture,
  1757. h264_chroma_mc_func * pix_op,
  1758. int mx, int my)
  1759. {
  1760. const int lowres = s->avctx->lowres;
  1761. const int op_index = FFMIN(lowres, 2);
  1762. const int block_s = 8 >> lowres;
  1763. const int s_mask = (2 << lowres) - 1;
  1764. const int h_edge_pos = s->h_edge_pos >> lowres + 1;
  1765. const int v_edge_pos = s->v_edge_pos >> lowres + 1;
  1766. int emu = 0, src_x, src_y, offset, sx, sy;
  1767. uint8_t *ptr;
  1768. if (s->quarter_sample) {
  1769. mx /= 2;
  1770. my /= 2;
  1771. }
  1772. /* In case of 8X8, we construct a single chroma motion vector
  1773. with a special rounding */
  1774. mx = ff_h263_round_chroma(mx);
  1775. my = ff_h263_round_chroma(my);
  1776. sx = mx & s_mask;
  1777. sy = my & s_mask;
  1778. src_x = s->mb_x * block_s + (mx >> lowres + 1);
  1779. src_y = s->mb_y * block_s + (my >> lowres + 1);
  1780. offset = src_y * s->uvlinesize + src_x;
  1781. ptr = ref_picture[1] + offset;
  1782. if (s->flags & CODEC_FLAG_EMU_EDGE) {
  1783. if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
  1784. (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
  1785. s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
  1786. 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
  1787. ptr = s->edge_emu_buffer;
  1788. emu = 1;
  1789. }
  1790. }
  1791. sx = (sx << 2) >> lowres;
  1792. sy = (sy << 2) >> lowres;
  1793. pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
  1794. ptr = ref_picture[2] + offset;
  1795. if (emu) {
  1796. s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
  1797. src_x, src_y, h_edge_pos, v_edge_pos);
  1798. ptr = s->edge_emu_buffer;
  1799. }
  1800. pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
  1801. }
  1802. /**
  1803. * motion compensation of a single macroblock
  1804. * @param s context
  1805. * @param dest_y luma destination pointer
  1806. * @param dest_cb chroma cb/u destination pointer
  1807. * @param dest_cr chroma cr/v destination pointer
  1808. * @param dir direction (0->forward, 1->backward)
  1809. * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
  1810. * @param pix_op halfpel motion compensation function (average or put normally)
  1811. * the motion vectors are taken from s->mv and the MV type from s->mv_type
  1812. */
  1813. static inline void MPV_motion_lowres(MpegEncContext *s,
  1814. uint8_t *dest_y, uint8_t *dest_cb,
  1815. uint8_t *dest_cr,
  1816. int dir, uint8_t **ref_picture,
  1817. h264_chroma_mc_func *pix_op)
  1818. {
  1819. int mx, my;
  1820. int mb_x, mb_y, i;
  1821. const int lowres = s->avctx->lowres;
  1822. const int block_s = 8 >>lowres;
  1823. mb_x = s->mb_x;
  1824. mb_y = s->mb_y;
  1825. switch (s->mv_type) {
  1826. case MV_TYPE_16X16:
  1827. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  1828. 0, 0, 0,
  1829. ref_picture, pix_op,
  1830. s->mv[dir][0][0], s->mv[dir][0][1],
  1831. 2 * block_s, mb_y);
  1832. break;
  1833. case MV_TYPE_8X8:
  1834. mx = 0;
  1835. my = 0;
  1836. for (i = 0; i < 4; i++) {
  1837. hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
  1838. s->linesize) * block_s,
  1839. ref_picture[0], 0, 0,
  1840. (2 * mb_x + (i & 1)) * block_s,
  1841. (2 * mb_y + (i >> 1)) * block_s,
  1842. s->width, s->height, s->linesize,
  1843. s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
  1844. block_s, block_s, pix_op,
  1845. s->mv[dir][i][0], s->mv[dir][i][1]);
  1846. mx += s->mv[dir][i][0];
  1847. my += s->mv[dir][i][1];
  1848. }
  1849. if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
  1850. chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
  1851. pix_op, mx, my);
  1852. break;
  1853. case MV_TYPE_FIELD:
  1854. if (s->picture_structure == PICT_FRAME) {
  1855. /* top field */
  1856. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  1857. 1, 0, s->field_select[dir][0],
  1858. ref_picture, pix_op,
  1859. s->mv[dir][0][0], s->mv[dir][0][1],
  1860. block_s, mb_y);
  1861. /* bottom field */
  1862. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  1863. 1, 1, s->field_select[dir][1],
  1864. ref_picture, pix_op,
  1865. s->mv[dir][1][0], s->mv[dir][1][1],
  1866. block_s, mb_y);
  1867. } else {
  1868. if (s->picture_structure != s->field_select[dir][0] + 1 &&
  1869. s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
  1870. ref_picture = s->current_picture_ptr->f.data;
  1871. }
  1872. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  1873. 0, 0, s->field_select[dir][0],
  1874. ref_picture, pix_op,
  1875. s->mv[dir][0][0],
  1876. s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
  1877. }
  1878. break;
  1879. case MV_TYPE_16X8:
  1880. for (i = 0; i < 2; i++) {
  1881. uint8_t **ref2picture;
  1882. if (s->picture_structure == s->field_select[dir][i] + 1 ||
  1883. s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
  1884. ref2picture = ref_picture;
  1885. } else {
  1886. ref2picture = s->current_picture_ptr->f.data;
  1887. }
  1888. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  1889. 0, 0, s->field_select[dir][i],
  1890. ref2picture, pix_op,
  1891. s->mv[dir][i][0], s->mv[dir][i][1] +
  1892. 2 * block_s * i, block_s, mb_y >> 1);
  1893. dest_y += 2 * block_s * s->linesize;
  1894. dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
  1895. dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
  1896. }
  1897. break;
  1898. case MV_TYPE_DMV:
  1899. if (s->picture_structure == PICT_FRAME) {
  1900. for (i = 0; i < 2; i++) {
  1901. int j;
  1902. for (j = 0; j < 2; j++) {
  1903. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  1904. 1, j, j ^ i,
  1905. ref_picture, pix_op,
  1906. s->mv[dir][2 * i + j][0],
  1907. s->mv[dir][2 * i + j][1],
  1908. block_s, mb_y);
  1909. }
  1910. pix_op = s->dsp.avg_h264_chroma_pixels_tab;
  1911. }
  1912. } else {
  1913. for (i = 0; i < 2; i++) {
  1914. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  1915. 0, 0, s->picture_structure != i + 1,
  1916. ref_picture, pix_op,
  1917. s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
  1918. 2 * block_s, mb_y >> 1);
  1919. // after put we make avg of the same block
  1920. pix_op = s->dsp.avg_h264_chroma_pixels_tab;
  1921. // opposite parity is always in the same
  1922. // frame if this is second field
  1923. if (!s->first_field) {
  1924. ref_picture = s->current_picture_ptr->f.data;
  1925. }
  1926. }
  1927. }
  1928. break;
  1929. default:
  1930. assert(0);
  1931. }
  1932. }
  1933. /**
  1934. * find the lowest MB row referenced in the MVs
  1935. */
  1936. int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
  1937. {
  1938. int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
  1939. int my, off, i, mvs;
  1940. if (s->picture_structure != PICT_FRAME) goto unhandled;
  1941. switch (s->mv_type) {
  1942. case MV_TYPE_16X16:
  1943. mvs = 1;
  1944. break;
  1945. case MV_TYPE_16X8:
  1946. mvs = 2;
  1947. break;
  1948. case MV_TYPE_8X8:
  1949. mvs = 4;
  1950. break;
  1951. default:
  1952. goto unhandled;
  1953. }
  1954. for (i = 0; i < mvs; i++) {
  1955. my = s->mv[dir][i][1]<<qpel_shift;
  1956. my_max = FFMAX(my_max, my);
  1957. my_min = FFMIN(my_min, my);
  1958. }
  1959. off = (FFMAX(-my_min, my_max) + 63) >> 6;
  1960. return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
  1961. unhandled:
  1962. return s->mb_height-1;
  1963. }
  1964. /* put block[] to dest[] */
  1965. static inline void put_dct(MpegEncContext *s,
  1966. DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
  1967. {
  1968. s->dct_unquantize_intra(s, block, i, qscale);
  1969. s->dsp.idct_put (dest, line_size, block);
  1970. }
  1971. /* add block[] to dest[] */
  1972. static inline void add_dct(MpegEncContext *s,
  1973. DCTELEM *block, int i, uint8_t *dest, int line_size)
  1974. {
  1975. if (s->block_last_index[i] >= 0) {
  1976. s->dsp.idct_add (dest, line_size, block);
  1977. }
  1978. }
  1979. static inline void add_dequant_dct(MpegEncContext *s,
  1980. DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
  1981. {
  1982. if (s->block_last_index[i] >= 0) {
  1983. s->dct_unquantize_inter(s, block, i, qscale);
  1984. s->dsp.idct_add (dest, line_size, block);
  1985. }
  1986. }
  1987. /**
  1988. * Clean dc, ac, coded_block for the current non-intra MB.
  1989. */
  1990. void ff_clean_intra_table_entries(MpegEncContext *s)
  1991. {
  1992. int wrap = s->b8_stride;
  1993. int xy = s->block_index[0];
  1994. s->dc_val[0][xy ] =
  1995. s->dc_val[0][xy + 1 ] =
  1996. s->dc_val[0][xy + wrap] =
  1997. s->dc_val[0][xy + 1 + wrap] = 1024;
  1998. /* ac pred */
  1999. memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
  2000. memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
  2001. if (s->msmpeg4_version>=3) {
  2002. s->coded_block[xy ] =
  2003. s->coded_block[xy + 1 ] =
  2004. s->coded_block[xy + wrap] =
  2005. s->coded_block[xy + 1 + wrap] = 0;
  2006. }
  2007. /* chroma */
  2008. wrap = s->mb_stride;
  2009. xy = s->mb_x + s->mb_y * wrap;
  2010. s->dc_val[1][xy] =
  2011. s->dc_val[2][xy] = 1024;
  2012. /* ac pred */
  2013. memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
  2014. memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
  2015. s->mbintra_table[xy]= 0;
  2016. }
  2017. /* generic function called after a macroblock has been parsed by the
  2018. decoder or after it has been encoded by the encoder.
  2019. Important variables used:
  2020. s->mb_intra : true if intra macroblock
  2021. s->mv_dir : motion vector direction
  2022. s->mv_type : motion vector type
  2023. s->mv : motion vector
  2024. s->interlaced_dct : true if interlaced dct used (mpeg2)
  2025. */
  2026. static av_always_inline
  2027. void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
  2028. int lowres_flag, int is_mpeg12)
  2029. {
  2030. const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
  2031. if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
  2032. ff_xvmc_decode_mb(s);//xvmc uses pblocks
  2033. return;
  2034. }
  2035. if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
  2036. /* save DCT coefficients */
  2037. int i,j;
  2038. DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
  2039. av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
  2040. for(i=0; i<6; i++){
  2041. for(j=0; j<64; j++){
  2042. *dct++ = block[i][s->dsp.idct_permutation[j]];
  2043. av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
  2044. }
  2045. av_log(s->avctx, AV_LOG_DEBUG, "\n");
  2046. }
  2047. }
  2048. s->current_picture.f.qscale_table[mb_xy] = s->qscale;
  2049. /* update DC predictors for P macroblocks */
  2050. if (!s->mb_intra) {
  2051. if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
  2052. if(s->mbintra_table[mb_xy])
  2053. ff_clean_intra_table_entries(s);
  2054. } else {
  2055. s->last_dc[0] =
  2056. s->last_dc[1] =
  2057. s->last_dc[2] = 128 << s->intra_dc_precision;
  2058. }
  2059. }
  2060. else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
  2061. s->mbintra_table[mb_xy]=1;
  2062. if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
  2063. uint8_t *dest_y, *dest_cb, *dest_cr;
  2064. int dct_linesize, dct_offset;
  2065. op_pixels_func (*op_pix)[4];
  2066. qpel_mc_func (*op_qpix)[16];
  2067. const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
  2068. const int uvlinesize = s->current_picture.f.linesize[1];
  2069. const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
  2070. const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
  2071. /* avoid copy if macroblock skipped in last frame too */
  2072. /* skip only during decoding as we might trash the buffers during encoding a bit */
  2073. if(!s->encoding){
  2074. uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
  2075. if (s->mb_skipped) {
  2076. s->mb_skipped= 0;
  2077. assert(s->pict_type!=AV_PICTURE_TYPE_I);
  2078. *mbskip_ptr = 1;
  2079. } else if(!s->current_picture.f.reference) {
  2080. *mbskip_ptr = 1;
  2081. } else{
  2082. *mbskip_ptr = 0; /* not skipped */
  2083. }
  2084. }
  2085. dct_linesize = linesize << s->interlaced_dct;
  2086. dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
  2087. if(readable){
  2088. dest_y= s->dest[0];
  2089. dest_cb= s->dest[1];
  2090. dest_cr= s->dest[2];
  2091. }else{
  2092. dest_y = s->b_scratchpad;
  2093. dest_cb= s->b_scratchpad+16*linesize;
  2094. dest_cr= s->b_scratchpad+32*linesize;
  2095. }
  2096. if (!s->mb_intra) {
  2097. /* motion handling */
  2098. /* decoding or more than one mb_type (MC was already done otherwise) */
  2099. if(!s->encoding){
  2100. if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
  2101. if (s->mv_dir & MV_DIR_FORWARD) {
  2102. ff_thread_await_progress((AVFrame*)s->last_picture_ptr, ff_MPV_lowest_referenced_row(s, 0), 0);
  2103. }
  2104. if (s->mv_dir & MV_DIR_BACKWARD) {
  2105. ff_thread_await_progress((AVFrame*)s->next_picture_ptr, ff_MPV_lowest_referenced_row(s, 1), 0);
  2106. }
  2107. }
  2108. if(lowres_flag){
  2109. h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
  2110. if (s->mv_dir & MV_DIR_FORWARD) {
  2111. MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
  2112. op_pix = s->dsp.avg_h264_chroma_pixels_tab;
  2113. }
  2114. if (s->mv_dir & MV_DIR_BACKWARD) {
  2115. MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
  2116. }
  2117. }else{
  2118. op_qpix= s->me.qpel_put;
  2119. if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
  2120. op_pix = s->dsp.put_pixels_tab;
  2121. }else{
  2122. op_pix = s->dsp.put_no_rnd_pixels_tab;
  2123. }
  2124. if (s->mv_dir & MV_DIR_FORWARD) {
  2125. MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
  2126. op_pix = s->dsp.avg_pixels_tab;
  2127. op_qpix= s->me.qpel_avg;
  2128. }
  2129. if (s->mv_dir & MV_DIR_BACKWARD) {
  2130. MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
  2131. }
  2132. }
  2133. }
  2134. /* skip dequant / idct if we are really late ;) */
  2135. if(s->avctx->skip_idct){
  2136. if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
  2137. ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
  2138. || s->avctx->skip_idct >= AVDISCARD_ALL)
  2139. goto skip_idct;
  2140. }
  2141. /* add dct residue */
  2142. if(s->encoding || !( s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
  2143. || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
  2144. add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
  2145. add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
  2146. add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
  2147. add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
  2148. if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
  2149. if (s->chroma_y_shift){
  2150. add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
  2151. add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
  2152. }else{
  2153. dct_linesize >>= 1;
  2154. dct_offset >>=1;
  2155. add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
  2156. add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
  2157. add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
  2158. add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
  2159. }
  2160. }
  2161. } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
  2162. add_dct(s, block[0], 0, dest_y , dct_linesize);
  2163. add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
  2164. add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
  2165. add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
  2166. if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
  2167. if(s->chroma_y_shift){//Chroma420
  2168. add_dct(s, block[4], 4, dest_cb, uvlinesize);
  2169. add_dct(s, block[5], 5, dest_cr, uvlinesize);
  2170. }else{
  2171. //chroma422
  2172. dct_linesize = uvlinesize << s->interlaced_dct;
  2173. dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
  2174. add_dct(s, block[4], 4, dest_cb, dct_linesize);
  2175. add_dct(s, block[5], 5, dest_cr, dct_linesize);
  2176. add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
  2177. add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
  2178. if(!s->chroma_x_shift){//Chroma444
  2179. add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
  2180. add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
  2181. add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
  2182. add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
  2183. }
  2184. }
  2185. }//fi gray
  2186. }
  2187. else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
  2188. ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
  2189. }
  2190. } else {
  2191. /* dct only in intra block */
  2192. if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
  2193. put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
  2194. put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
  2195. put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
  2196. put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
  2197. if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
  2198. if(s->chroma_y_shift){
  2199. put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
  2200. put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
  2201. }else{
  2202. dct_offset >>=1;
  2203. dct_linesize >>=1;
  2204. put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
  2205. put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
  2206. put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
  2207. put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
  2208. }
  2209. }
  2210. }else{
  2211. s->dsp.idct_put(dest_y , dct_linesize, block[0]);
  2212. s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
  2213. s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
  2214. s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
  2215. if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
  2216. if(s->chroma_y_shift){
  2217. s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
  2218. s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
  2219. }else{
  2220. dct_linesize = uvlinesize << s->interlaced_dct;
  2221. dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
  2222. s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
  2223. s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
  2224. s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
  2225. s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
  2226. if(!s->chroma_x_shift){//Chroma444
  2227. s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
  2228. s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
  2229. s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
  2230. s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
  2231. }
  2232. }
  2233. }//gray
  2234. }
  2235. }
  2236. skip_idct:
  2237. if(!readable){
  2238. s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
  2239. s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
  2240. s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
  2241. }
  2242. }
  2243. }
  2244. void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
  2245. #if !CONFIG_SMALL
  2246. if(s->out_format == FMT_MPEG1) {
  2247. if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
  2248. else MPV_decode_mb_internal(s, block, 0, 1);
  2249. } else
  2250. #endif
  2251. if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
  2252. else MPV_decode_mb_internal(s, block, 0, 0);
  2253. }
  2254. /**
  2255. * @param h is the normal height, this will be reduced automatically if needed for the last row
  2256. */
  2257. void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
  2258. const int field_pic= s->picture_structure != PICT_FRAME;
  2259. if(field_pic){
  2260. h <<= 1;
  2261. y <<= 1;
  2262. }
  2263. if (!s->avctx->hwaccel
  2264. && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
  2265. && s->unrestricted_mv
  2266. && s->current_picture.f.reference
  2267. && !s->intra_only
  2268. && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
  2269. int sides = 0, edge_h;
  2270. int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
  2271. int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
  2272. if (y==0) sides |= EDGE_TOP;
  2273. if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
  2274. edge_h= FFMIN(h, s->v_edge_pos - y);
  2275. s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
  2276. s->linesize, s->h_edge_pos, edge_h,
  2277. EDGE_WIDTH, EDGE_WIDTH, sides);
  2278. s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
  2279. s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
  2280. EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
  2281. s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
  2282. s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
  2283. EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
  2284. }
  2285. h= FFMIN(h, s->avctx->height - y);
  2286. if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
  2287. if (s->avctx->draw_horiz_band) {
  2288. AVFrame *src;
  2289. int offset[AV_NUM_DATA_POINTERS];
  2290. int i;
  2291. if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
  2292. src= (AVFrame*)s->current_picture_ptr;
  2293. else if(s->last_picture_ptr)
  2294. src= (AVFrame*)s->last_picture_ptr;
  2295. else
  2296. return;
  2297. if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
  2298. for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
  2299. offset[i] = 0;
  2300. }else{
  2301. offset[0]= y * s->linesize;
  2302. offset[1]=
  2303. offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
  2304. for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
  2305. offset[i] = 0;
  2306. }
  2307. emms_c();
  2308. s->avctx->draw_horiz_band(s->avctx, src, offset,
  2309. y, s->picture_structure, h);
  2310. }
  2311. }
  2312. void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
  2313. const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
  2314. const int uvlinesize = s->current_picture.f.linesize[1];
  2315. const int mb_size= 4 - s->avctx->lowres;
  2316. s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
  2317. s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
  2318. s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
  2319. s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
  2320. s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
  2321. s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
  2322. //block_index is not used by mpeg2, so it is not affected by chroma_format
  2323. s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
  2324. s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
  2325. s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
  2326. if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
  2327. {
  2328. if(s->picture_structure==PICT_FRAME){
  2329. s->dest[0] += s->mb_y * linesize << mb_size;
  2330. s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
  2331. s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
  2332. }else{
  2333. s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
  2334. s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
  2335. s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
  2336. assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
  2337. }
  2338. }
  2339. }
  2340. void ff_mpeg_flush(AVCodecContext *avctx){
  2341. int i;
  2342. MpegEncContext *s = avctx->priv_data;
  2343. if(s==NULL || s->picture==NULL)
  2344. return;
  2345. for(i=0; i<s->picture_count; i++){
  2346. if (s->picture[i].f.data[0] &&
  2347. (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
  2348. s->picture[i].f.type == FF_BUFFER_TYPE_USER))
  2349. free_frame_buffer(s, &s->picture[i]);
  2350. }
  2351. s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
  2352. s->mb_x= s->mb_y= 0;
  2353. s->parse_context.state= -1;
  2354. s->parse_context.frame_start_found= 0;
  2355. s->parse_context.overread= 0;
  2356. s->parse_context.overread_index= 0;
  2357. s->parse_context.index= 0;
  2358. s->parse_context.last_index= 0;
  2359. s->bitstream_buffer_size=0;
  2360. s->pp_time=0;
  2361. }
  2362. static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
  2363. DCTELEM *block, int n, int qscale)
  2364. {
  2365. int i, level, nCoeffs;
  2366. const uint16_t *quant_matrix;
  2367. nCoeffs= s->block_last_index[n];
  2368. if (n < 4)
  2369. block[0] = block[0] * s->y_dc_scale;
  2370. else
  2371. block[0] = block[0] * s->c_dc_scale;
  2372. /* XXX: only mpeg1 */
  2373. quant_matrix = s->intra_matrix;
  2374. for(i=1;i<=nCoeffs;i++) {
  2375. int j= s->intra_scantable.permutated[i];
  2376. level = block[j];
  2377. if (level) {
  2378. if (level < 0) {
  2379. level = -level;
  2380. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  2381. level = (level - 1) | 1;
  2382. level = -level;
  2383. } else {
  2384. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  2385. level = (level - 1) | 1;
  2386. }
  2387. block[j] = level;
  2388. }
  2389. }
  2390. }
  2391. static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
  2392. DCTELEM *block, int n, int qscale)
  2393. {
  2394. int i, level, nCoeffs;
  2395. const uint16_t *quant_matrix;
  2396. nCoeffs= s->block_last_index[n];
  2397. quant_matrix = s->inter_matrix;
  2398. for(i=0; i<=nCoeffs; i++) {
  2399. int j= s->intra_scantable.permutated[i];
  2400. level = block[j];
  2401. if (level) {
  2402. if (level < 0) {
  2403. level = -level;
  2404. level = (((level << 1) + 1) * qscale *
  2405. ((int) (quant_matrix[j]))) >> 4;
  2406. level = (level - 1) | 1;
  2407. level = -level;
  2408. } else {
  2409. level = (((level << 1) + 1) * qscale *
  2410. ((int) (quant_matrix[j]))) >> 4;
  2411. level = (level - 1) | 1;
  2412. }
  2413. block[j] = level;
  2414. }
  2415. }
  2416. }
  2417. static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
  2418. DCTELEM *block, int n, int qscale)
  2419. {
  2420. int i, level, nCoeffs;
  2421. const uint16_t *quant_matrix;
  2422. if(s->alternate_scan) nCoeffs= 63;
  2423. else nCoeffs= s->block_last_index[n];
  2424. if (n < 4)
  2425. block[0] = block[0] * s->y_dc_scale;
  2426. else
  2427. block[0] = block[0] * s->c_dc_scale;
  2428. quant_matrix = s->intra_matrix;
  2429. for(i=1;i<=nCoeffs;i++) {
  2430. int j= s->intra_scantable.permutated[i];
  2431. level = block[j];
  2432. if (level) {
  2433. if (level < 0) {
  2434. level = -level;
  2435. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  2436. level = -level;
  2437. } else {
  2438. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  2439. }
  2440. block[j] = level;
  2441. }
  2442. }
  2443. }
  2444. static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
  2445. DCTELEM *block, int n, int qscale)
  2446. {
  2447. int i, level, nCoeffs;
  2448. const uint16_t *quant_matrix;
  2449. int sum=-1;
  2450. if(s->alternate_scan) nCoeffs= 63;
  2451. else nCoeffs= s->block_last_index[n];
  2452. if (n < 4)
  2453. block[0] = block[0] * s->y_dc_scale;
  2454. else
  2455. block[0] = block[0] * s->c_dc_scale;
  2456. quant_matrix = s->intra_matrix;
  2457. for(i=1;i<=nCoeffs;i++) {
  2458. int j= s->intra_scantable.permutated[i];
  2459. level = block[j];
  2460. if (level) {
  2461. if (level < 0) {
  2462. level = -level;
  2463. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  2464. level = -level;
  2465. } else {
  2466. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  2467. }
  2468. block[j] = level;
  2469. sum+=level;
  2470. }
  2471. }
  2472. block[63]^=sum&1;
  2473. }
  2474. static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
  2475. DCTELEM *block, int n, int qscale)
  2476. {
  2477. int i, level, nCoeffs;
  2478. const uint16_t *quant_matrix;
  2479. int sum=-1;
  2480. if(s->alternate_scan) nCoeffs= 63;
  2481. else nCoeffs= s->block_last_index[n];
  2482. quant_matrix = s->inter_matrix;
  2483. for(i=0; i<=nCoeffs; i++) {
  2484. int j= s->intra_scantable.permutated[i];
  2485. level = block[j];
  2486. if (level) {
  2487. if (level < 0) {
  2488. level = -level;
  2489. level = (((level << 1) + 1) * qscale *
  2490. ((int) (quant_matrix[j]))) >> 4;
  2491. level = -level;
  2492. } else {
  2493. level = (((level << 1) + 1) * qscale *
  2494. ((int) (quant_matrix[j]))) >> 4;
  2495. }
  2496. block[j] = level;
  2497. sum+=level;
  2498. }
  2499. }
  2500. block[63]^=sum&1;
  2501. }
  2502. static void dct_unquantize_h263_intra_c(MpegEncContext *s,
  2503. DCTELEM *block, int n, int qscale)
  2504. {
  2505. int i, level, qmul, qadd;
  2506. int nCoeffs;
  2507. assert(s->block_last_index[n]>=0);
  2508. qmul = qscale << 1;
  2509. if (!s->h263_aic) {
  2510. if (n < 4)
  2511. block[0] = block[0] * s->y_dc_scale;
  2512. else
  2513. block[0] = block[0] * s->c_dc_scale;
  2514. qadd = (qscale - 1) | 1;
  2515. }else{
  2516. qadd = 0;
  2517. }
  2518. if(s->ac_pred)
  2519. nCoeffs=63;
  2520. else
  2521. nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
  2522. for(i=1; i<=nCoeffs; i++) {
  2523. level = block[i];
  2524. if (level) {
  2525. if (level < 0) {
  2526. level = level * qmul - qadd;
  2527. } else {
  2528. level = level * qmul + qadd;
  2529. }
  2530. block[i] = level;
  2531. }
  2532. }
  2533. }
  2534. static void dct_unquantize_h263_inter_c(MpegEncContext *s,
  2535. DCTELEM *block, int n, int qscale)
  2536. {
  2537. int i, level, qmul, qadd;
  2538. int nCoeffs;
  2539. assert(s->block_last_index[n]>=0);
  2540. qadd = (qscale - 1) | 1;
  2541. qmul = qscale << 1;
  2542. nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
  2543. for(i=0; i<=nCoeffs; i++) {
  2544. level = block[i];
  2545. if (level) {
  2546. if (level < 0) {
  2547. level = level * qmul - qadd;
  2548. } else {
  2549. level = level * qmul + qadd;
  2550. }
  2551. block[i] = level;
  2552. }
  2553. }
  2554. }
  2555. /**
  2556. * set qscale and update qscale dependent variables.
  2557. */
  2558. void ff_set_qscale(MpegEncContext * s, int qscale)
  2559. {
  2560. if (qscale < 1)
  2561. qscale = 1;
  2562. else if (qscale > 31)
  2563. qscale = 31;
  2564. s->qscale = qscale;
  2565. s->chroma_qscale= s->chroma_qscale_table[qscale];
  2566. s->y_dc_scale= s->y_dc_scale_table[ qscale ];
  2567. s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
  2568. }
  2569. void ff_MPV_report_decode_progress(MpegEncContext *s)
  2570. {
  2571. if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
  2572. ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);
  2573. }