You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2896 lines
110KB

  1. /*
  2. * The simplest mpeg encoder (well, it was the simplest!)
  3. * Copyright (c) 2000,2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
  7. *
  8. * This file is part of FFmpeg.
  9. *
  10. * FFmpeg is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * FFmpeg is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with FFmpeg; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. /**
  25. * @file
  26. * The simplest mpeg encoder (well, it was the simplest!).
  27. */
  28. #include "libavutil/intmath.h"
  29. #include "libavutil/imgutils.h"
  30. #include "avcodec.h"
  31. #include "dsputil.h"
  32. #include "internal.h"
  33. #include "mpegvideo.h"
  34. #include "mjpegenc.h"
  35. #include "msmpeg4.h"
  36. #include "xvmc_internal.h"
  37. #include "thread.h"
  38. #include <limits.h>
  39. //#undef NDEBUG
  40. //#include <assert.h>
  41. static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
  42. DCTELEM *block, int n, int qscale);
  43. static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
  44. DCTELEM *block, int n, int qscale);
  45. static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
  46. DCTELEM *block, int n, int qscale);
  47. static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
  48. DCTELEM *block, int n, int qscale);
  49. static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
  50. DCTELEM *block, int n, int qscale);
  51. static void dct_unquantize_h263_intra_c(MpegEncContext *s,
  52. DCTELEM *block, int n, int qscale);
  53. static void dct_unquantize_h263_inter_c(MpegEncContext *s,
  54. DCTELEM *block, int n, int qscale);
  55. /* enable all paranoid tests for rounding, overflows, etc... */
  56. //#define PARANOID
  57. //#define DEBUG
  58. static const uint8_t ff_default_chroma_qscale_table[32] = {
  59. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  60. 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
  61. 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
  62. };
  63. const uint8_t ff_mpeg1_dc_scale_table[128] = {
  64. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  65. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  66. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  67. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  68. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  69. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  70. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  71. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  72. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  73. };
  74. static const uint8_t mpeg2_dc_scale_table1[128] = {
  75. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  76. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  77. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  78. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  79. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  80. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  81. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  82. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  83. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  84. };
  85. static const uint8_t mpeg2_dc_scale_table2[128] = {
  86. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  87. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  88. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  89. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  90. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  91. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  92. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  93. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  94. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  95. };
  96. static const uint8_t mpeg2_dc_scale_table3[128] = {
  97. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  98. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  99. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  100. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  101. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  102. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  103. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  104. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  105. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  106. };
  107. const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
  108. ff_mpeg1_dc_scale_table,
  109. mpeg2_dc_scale_table1,
  110. mpeg2_dc_scale_table2,
  111. mpeg2_dc_scale_table3,
  112. };
  113. const enum PixelFormat ff_pixfmt_list_420[] = {
  114. PIX_FMT_YUV420P,
  115. PIX_FMT_NONE
  116. };
  117. const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
  118. PIX_FMT_DXVA2_VLD,
  119. PIX_FMT_VAAPI_VLD,
  120. PIX_FMT_VDA_VLD,
  121. PIX_FMT_YUV420P,
  122. PIX_FMT_NONE
  123. };
  124. const uint8_t *avpriv_mpv_find_start_code(const uint8_t *av_restrict p,
  125. const uint8_t *end,
  126. uint32_t *av_restrict state)
  127. {
  128. int i;
  129. assert(p <= end);
  130. if (p >= end)
  131. return end;
  132. for (i = 0; i < 3; i++) {
  133. uint32_t tmp = *state << 8;
  134. *state = tmp + *(p++);
  135. if (tmp == 0x100 || p == end)
  136. return p;
  137. }
  138. while (p < end) {
  139. if (p[-1] > 1 ) p += 3;
  140. else if (p[-2] ) p += 2;
  141. else if (p[-3]|(p[-1]-1)) p++;
  142. else {
  143. p++;
  144. break;
  145. }
  146. }
  147. p = FFMIN(p, end) - 4;
  148. *state = AV_RB32(p);
  149. return p + 4;
  150. }
  151. /* init common dct for both encoder and decoder */
  152. av_cold int ff_dct_common_init(MpegEncContext *s)
  153. {
  154. ff_dsputil_init(&s->dsp, s->avctx);
  155. s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
  156. s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
  157. s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
  158. s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
  159. s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
  160. if (s->flags & CODEC_FLAG_BITEXACT)
  161. s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
  162. s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
  163. #if ARCH_X86
  164. ff_MPV_common_init_x86(s);
  165. #elif ARCH_ALPHA
  166. ff_MPV_common_init_axp(s);
  167. #elif HAVE_MMI
  168. ff_MPV_common_init_mmi(s);
  169. #elif ARCH_ARM
  170. ff_MPV_common_init_arm(s);
  171. #elif HAVE_ALTIVEC
  172. ff_MPV_common_init_altivec(s);
  173. #elif ARCH_BFIN
  174. ff_MPV_common_init_bfin(s);
  175. #endif
  176. /* load & permutate scantables
  177. * note: only wmv uses different ones
  178. */
  179. if (s->alternate_scan) {
  180. ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
  181. ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
  182. } else {
  183. ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
  184. ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
  185. }
  186. ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
  187. ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
  188. return 0;
  189. }
  190. void ff_copy_picture(Picture *dst, Picture *src)
  191. {
  192. *dst = *src;
  193. dst->f.type = FF_BUFFER_TYPE_COPY;
  194. }
  195. /**
  196. * Release a frame buffer
  197. */
  198. static void free_frame_buffer(MpegEncContext *s, Picture *pic)
  199. {
  200. /* WM Image / Screen codecs allocate internal buffers with different
  201. * dimensions / colorspaces; ignore user-defined callbacks for these. */
  202. if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
  203. s->codec_id != AV_CODEC_ID_VC1IMAGE &&
  204. s->codec_id != AV_CODEC_ID_MSS2)
  205. ff_thread_release_buffer(s->avctx, &pic->f);
  206. else
  207. avcodec_default_release_buffer(s->avctx, &pic->f);
  208. av_freep(&pic->f.hwaccel_picture_private);
  209. }
  210. /**
  211. * Allocate a frame buffer
  212. */
  213. static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
  214. {
  215. int r;
  216. if (s->avctx->hwaccel) {
  217. assert(!pic->f.hwaccel_picture_private);
  218. if (s->avctx->hwaccel->priv_data_size) {
  219. pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
  220. if (!pic->f.hwaccel_picture_private) {
  221. av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
  222. return -1;
  223. }
  224. }
  225. }
  226. if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
  227. s->codec_id != AV_CODEC_ID_VC1IMAGE &&
  228. s->codec_id != AV_CODEC_ID_MSS2)
  229. r = ff_thread_get_buffer(s->avctx, &pic->f);
  230. else
  231. r = avcodec_default_get_buffer(s->avctx, &pic->f);
  232. if (r < 0 || !pic->f.type || !pic->f.data[0]) {
  233. av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
  234. r, pic->f.type, pic->f.data[0]);
  235. av_freep(&pic->f.hwaccel_picture_private);
  236. return -1;
  237. }
  238. if (s->linesize && (s->linesize != pic->f.linesize[0] ||
  239. s->uvlinesize != pic->f.linesize[1])) {
  240. av_log(s->avctx, AV_LOG_ERROR,
  241. "get_buffer() failed (stride changed)\n");
  242. free_frame_buffer(s, pic);
  243. return -1;
  244. }
  245. if (pic->f.linesize[1] != pic->f.linesize[2]) {
  246. av_log(s->avctx, AV_LOG_ERROR,
  247. "get_buffer() failed (uv stride mismatch)\n");
  248. free_frame_buffer(s, pic);
  249. return -1;
  250. }
  251. return 0;
  252. }
  253. /**
  254. * Allocate a Picture.
  255. * The pixels are allocated/set by calling get_buffer() if shared = 0
  256. */
  257. int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
  258. {
  259. const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
  260. // the + 1 is needed so memset(,,stride*height) does not sig11
  261. const int mb_array_size = s->mb_stride * s->mb_height;
  262. const int b8_array_size = s->b8_stride * s->mb_height * 2;
  263. const int b4_array_size = s->b4_stride * s->mb_height * 4;
  264. int i;
  265. int r = -1;
  266. if (shared) {
  267. assert(pic->f.data[0]);
  268. assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
  269. pic->f.type = FF_BUFFER_TYPE_SHARED;
  270. } else {
  271. assert(!pic->f.data[0]);
  272. if (alloc_frame_buffer(s, pic) < 0)
  273. return -1;
  274. s->linesize = pic->f.linesize[0];
  275. s->uvlinesize = pic->f.linesize[1];
  276. }
  277. if (pic->f.qscale_table == NULL) {
  278. if (s->encoding) {
  279. FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
  280. mb_array_size * sizeof(int16_t), fail)
  281. FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
  282. mb_array_size * sizeof(int16_t), fail)
  283. FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
  284. mb_array_size * sizeof(int8_t ), fail)
  285. }
  286. FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
  287. mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
  288. FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
  289. (big_mb_num + s->mb_stride) * sizeof(uint8_t),
  290. fail)
  291. FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
  292. (big_mb_num + s->mb_stride) * sizeof(uint32_t),
  293. fail)
  294. pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
  295. pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
  296. if (s->out_format == FMT_H264) {
  297. for (i = 0; i < 2; i++) {
  298. FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
  299. 2 * (b4_array_size + 4) * sizeof(int16_t),
  300. fail)
  301. pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
  302. FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
  303. 4 * mb_array_size * sizeof(uint8_t), fail)
  304. }
  305. pic->f.motion_subsample_log2 = 2;
  306. } else if (s->out_format == FMT_H263 || s->encoding ||
  307. (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
  308. for (i = 0; i < 2; i++) {
  309. FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
  310. 2 * (b8_array_size + 4) * sizeof(int16_t),
  311. fail)
  312. pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
  313. FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
  314. 4 * mb_array_size * sizeof(uint8_t), fail)
  315. }
  316. pic->f.motion_subsample_log2 = 3;
  317. }
  318. if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
  319. FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
  320. 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
  321. }
  322. pic->f.qstride = s->mb_stride;
  323. FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
  324. 1 * sizeof(AVPanScan), fail)
  325. }
  326. pic->owner2 = s;
  327. return 0;
  328. fail: // for the FF_ALLOCZ_OR_GOTO macro
  329. if (r >= 0)
  330. free_frame_buffer(s, pic);
  331. return -1;
  332. }
  333. /**
  334. * Deallocate a picture.
  335. */
  336. static void free_picture(MpegEncContext *s, Picture *pic)
  337. {
  338. int i;
  339. if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
  340. free_frame_buffer(s, pic);
  341. }
  342. av_freep(&pic->mb_var);
  343. av_freep(&pic->mc_mb_var);
  344. av_freep(&pic->mb_mean);
  345. av_freep(&pic->f.mbskip_table);
  346. av_freep(&pic->qscale_table_base);
  347. pic->f.qscale_table = NULL;
  348. av_freep(&pic->mb_type_base);
  349. pic->f.mb_type = NULL;
  350. av_freep(&pic->f.dct_coeff);
  351. av_freep(&pic->f.pan_scan);
  352. pic->f.mb_type = NULL;
  353. for (i = 0; i < 2; i++) {
  354. av_freep(&pic->motion_val_base[i]);
  355. av_freep(&pic->f.ref_index[i]);
  356. pic->f.motion_val[i] = NULL;
  357. }
  358. if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
  359. for (i = 0; i < 4; i++) {
  360. pic->f.base[i] =
  361. pic->f.data[i] = NULL;
  362. }
  363. pic->f.type = 0;
  364. }
  365. }
  366. static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
  367. {
  368. int y_size = s->b8_stride * (2 * s->mb_height + 1);
  369. int c_size = s->mb_stride * (s->mb_height + 1);
  370. int yc_size = y_size + 2 * c_size;
  371. int i;
  372. // edge emu needs blocksize + filter length - 1
  373. // (= 17x17 for halfpel / 21x21 for h264)
  374. FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
  375. (s->width + 95) * 2 * 21 * 4, fail); // (width + edge + align)*interlaced*MBsize*tolerance
  376. // FIXME should be linesize instead of s->width * 2
  377. // but that is not known before get_buffer()
  378. FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
  379. (s->width + 95) * 4 * 16 * 2 * sizeof(uint8_t), fail)
  380. s->me.temp = s->me.scratchpad;
  381. s->rd_scratchpad = s->me.scratchpad;
  382. s->b_scratchpad = s->me.scratchpad;
  383. s->obmc_scratchpad = s->me.scratchpad + 16;
  384. if (s->encoding) {
  385. FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
  386. ME_MAP_SIZE * sizeof(uint32_t), fail)
  387. FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
  388. ME_MAP_SIZE * sizeof(uint32_t), fail)
  389. if (s->avctx->noise_reduction) {
  390. FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
  391. 2 * 64 * sizeof(int), fail)
  392. }
  393. }
  394. FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
  395. s->block = s->blocks[0];
  396. for (i = 0; i < 12; i++) {
  397. s->pblocks[i] = &s->block[i];
  398. }
  399. if (s->out_format == FMT_H263) {
  400. /* ac values */
  401. FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
  402. yc_size * sizeof(int16_t) * 16, fail);
  403. s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
  404. s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
  405. s->ac_val[2] = s->ac_val[1] + c_size;
  406. }
  407. return 0;
  408. fail:
  409. return -1; // free() through ff_MPV_common_end()
  410. }
  411. static void free_duplicate_context(MpegEncContext *s)
  412. {
  413. if (s == NULL)
  414. return;
  415. av_freep(&s->edge_emu_buffer);
  416. av_freep(&s->me.scratchpad);
  417. s->me.temp =
  418. s->rd_scratchpad =
  419. s->b_scratchpad =
  420. s->obmc_scratchpad = NULL;
  421. av_freep(&s->dct_error_sum);
  422. av_freep(&s->me.map);
  423. av_freep(&s->me.score_map);
  424. av_freep(&s->blocks);
  425. av_freep(&s->ac_val_base);
  426. s->block = NULL;
  427. }
  428. static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
  429. {
  430. #define COPY(a) bak->a = src->a
  431. COPY(edge_emu_buffer);
  432. COPY(me.scratchpad);
  433. COPY(me.temp);
  434. COPY(rd_scratchpad);
  435. COPY(b_scratchpad);
  436. COPY(obmc_scratchpad);
  437. COPY(me.map);
  438. COPY(me.score_map);
  439. COPY(blocks);
  440. COPY(block);
  441. COPY(start_mb_y);
  442. COPY(end_mb_y);
  443. COPY(me.map_generation);
  444. COPY(pb);
  445. COPY(dct_error_sum);
  446. COPY(dct_count[0]);
  447. COPY(dct_count[1]);
  448. COPY(ac_val_base);
  449. COPY(ac_val[0]);
  450. COPY(ac_val[1]);
  451. COPY(ac_val[2]);
  452. #undef COPY
  453. }
  454. void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
  455. {
  456. MpegEncContext bak;
  457. int i;
  458. // FIXME copy only needed parts
  459. // START_TIMER
  460. backup_duplicate_context(&bak, dst);
  461. memcpy(dst, src, sizeof(MpegEncContext));
  462. backup_duplicate_context(dst, &bak);
  463. for (i = 0; i < 12; i++) {
  464. dst->pblocks[i] = &dst->block[i];
  465. }
  466. // STOP_TIMER("update_duplicate_context")
  467. // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
  468. }
  469. int ff_mpeg_update_thread_context(AVCodecContext *dst,
  470. const AVCodecContext *src)
  471. {
  472. MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
  473. if (dst == src)
  474. return 0;
  475. // FIXME can parameters change on I-frames?
  476. // in that case dst may need a reinit
  477. if (!s->context_initialized) {
  478. memcpy(s, s1, sizeof(MpegEncContext));
  479. s->avctx = dst;
  480. s->bitstream_buffer = NULL;
  481. s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
  482. if (s1->context_initialized){
  483. s->picture_range_start += MAX_PICTURE_COUNT;
  484. s->picture_range_end += MAX_PICTURE_COUNT;
  485. ff_MPV_common_init(s);
  486. }
  487. }
  488. s->avctx->coded_height = s1->avctx->coded_height;
  489. s->avctx->coded_width = s1->avctx->coded_width;
  490. s->avctx->width = s1->avctx->width;
  491. s->avctx->height = s1->avctx->height;
  492. s->coded_picture_number = s1->coded_picture_number;
  493. s->picture_number = s1->picture_number;
  494. s->input_picture_number = s1->input_picture_number;
  495. memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
  496. memcpy(&s->last_picture, &s1->last_picture,
  497. (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
  498. s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
  499. s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
  500. s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
  501. // Error/bug resilience
  502. s->next_p_frame_damaged = s1->next_p_frame_damaged;
  503. s->workaround_bugs = s1->workaround_bugs;
  504. s->padding_bug_score = s1->padding_bug_score;
  505. // MPEG4 timing info
  506. memcpy(&s->time_increment_bits, &s1->time_increment_bits,
  507. (char *) &s1->shape - (char *) &s1->time_increment_bits);
  508. // B-frame info
  509. s->max_b_frames = s1->max_b_frames;
  510. s->low_delay = s1->low_delay;
  511. s->dropable = s1->dropable;
  512. // DivX handling (doesn't work)
  513. s->divx_packed = s1->divx_packed;
  514. if (s1->bitstream_buffer) {
  515. if (s1->bitstream_buffer_size +
  516. FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
  517. av_fast_malloc(&s->bitstream_buffer,
  518. &s->allocated_bitstream_buffer_size,
  519. s1->allocated_bitstream_buffer_size);
  520. s->bitstream_buffer_size = s1->bitstream_buffer_size;
  521. memcpy(s->bitstream_buffer, s1->bitstream_buffer,
  522. s1->bitstream_buffer_size);
  523. memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
  524. FF_INPUT_BUFFER_PADDING_SIZE);
  525. }
  526. // MPEG2/interlacing info
  527. memcpy(&s->progressive_sequence, &s1->progressive_sequence,
  528. (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
  529. if (!s1->first_field) {
  530. s->last_pict_type = s1->pict_type;
  531. if (s1->current_picture_ptr)
  532. s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
  533. if (s1->pict_type != AV_PICTURE_TYPE_B) {
  534. s->last_non_b_pict_type = s1->pict_type;
  535. }
  536. }
  537. return 0;
  538. }
  539. /**
  540. * Set the given MpegEncContext to common defaults
  541. * (same for encoding and decoding).
  542. * The changed fields will not depend upon the
  543. * prior state of the MpegEncContext.
  544. */
  545. void ff_MPV_common_defaults(MpegEncContext *s)
  546. {
  547. s->y_dc_scale_table =
  548. s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
  549. s->chroma_qscale_table = ff_default_chroma_qscale_table;
  550. s->progressive_frame = 1;
  551. s->progressive_sequence = 1;
  552. s->picture_structure = PICT_FRAME;
  553. s->coded_picture_number = 0;
  554. s->picture_number = 0;
  555. s->input_picture_number = 0;
  556. s->picture_in_gop_number = 0;
  557. s->f_code = 1;
  558. s->b_code = 1;
  559. s->picture_range_start = 0;
  560. s->picture_range_end = MAX_PICTURE_COUNT;
  561. s->slice_context_count = 1;
  562. }
  563. /**
  564. * Set the given MpegEncContext to defaults for decoding.
  565. * the changed fields will not depend upon
  566. * the prior state of the MpegEncContext.
  567. */
  568. void ff_MPV_decode_defaults(MpegEncContext *s)
  569. {
  570. ff_MPV_common_defaults(s);
  571. }
  572. /**
  573. * init common structure for both encoder and decoder.
  574. * this assumes that some variables like width/height are already set
  575. */
  576. av_cold int ff_MPV_common_init(MpegEncContext *s)
  577. {
  578. int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
  579. int nb_slices = (HAVE_THREADS &&
  580. s->avctx->active_thread_type & FF_THREAD_SLICE) ?
  581. s->avctx->thread_count : 1;
  582. if (s->encoding && s->avctx->slices)
  583. nb_slices = s->avctx->slices;
  584. if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
  585. s->mb_height = (s->height + 31) / 32 * 2;
  586. else if (s->codec_id != AV_CODEC_ID_H264)
  587. s->mb_height = (s->height + 15) / 16;
  588. if (s->avctx->pix_fmt == PIX_FMT_NONE) {
  589. av_log(s->avctx, AV_LOG_ERROR,
  590. "decoding to PIX_FMT_NONE is not supported.\n");
  591. return -1;
  592. }
  593. if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
  594. int max_slices;
  595. if (s->mb_height)
  596. max_slices = FFMIN(MAX_THREADS, s->mb_height);
  597. else
  598. max_slices = MAX_THREADS;
  599. av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
  600. " reducing to %d\n", nb_slices, max_slices);
  601. nb_slices = max_slices;
  602. }
  603. if ((s->width || s->height) &&
  604. av_image_check_size(s->width, s->height, 0, s->avctx))
  605. return -1;
  606. ff_dct_common_init(s);
  607. s->flags = s->avctx->flags;
  608. s->flags2 = s->avctx->flags2;
  609. s->mb_width = (s->width + 15) / 16;
  610. s->mb_stride = s->mb_width + 1;
  611. s->b8_stride = s->mb_width * 2 + 1;
  612. s->b4_stride = s->mb_width * 4 + 1;
  613. mb_array_size = s->mb_height * s->mb_stride;
  614. mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
  615. /* set chroma shifts */
  616. avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift,
  617. &s->chroma_y_shift);
  618. /* set default edge pos, will be overridden in decode_header if needed */
  619. s->h_edge_pos = s->mb_width * 16;
  620. s->v_edge_pos = s->mb_height * 16;
  621. s->mb_num = s->mb_width * s->mb_height;
  622. s->block_wrap[0] =
  623. s->block_wrap[1] =
  624. s->block_wrap[2] =
  625. s->block_wrap[3] = s->b8_stride;
  626. s->block_wrap[4] =
  627. s->block_wrap[5] = s->mb_stride;
  628. y_size = s->b8_stride * (2 * s->mb_height + 1);
  629. c_size = s->mb_stride * (s->mb_height + 1);
  630. yc_size = y_size + 2 * c_size;
  631. /* convert fourcc to upper case */
  632. s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
  633. s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
  634. s->avctx->coded_frame = &s->current_picture.f;
  635. FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
  636. for (y = 0; y < s->mb_height; y++)
  637. for (x = 0; x < s->mb_width; x++)
  638. s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
  639. s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
  640. if (s->encoding) {
  641. /* Allocate MV tables */
  642. FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
  643. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
  644. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
  645. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
  646. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
  647. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
  648. s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
  649. s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
  650. s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
  651. s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
  652. s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
  653. s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
  654. if(s->msmpeg4_version){
  655. FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
  656. }
  657. FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
  658. /* Allocate MB type table */
  659. FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
  660. FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
  661. FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix , 64*32 * sizeof(int), fail)
  662. FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix , 64*32 * sizeof(int), fail)
  663. FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix , 64*32 * sizeof(int), fail)
  664. FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16 , 64*32*2 * sizeof(uint16_t), fail)
  665. FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
  666. FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16 , 64*32*2 * sizeof(uint16_t), fail)
  667. FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
  668. FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
  669. if(s->avctx->noise_reduction){
  670. FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
  671. }
  672. FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
  673. mb_array_size * sizeof(float), fail);
  674. FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
  675. mb_array_size * sizeof(float), fail);
  676. }
  677. s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
  678. FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
  679. s->picture_count * sizeof(Picture), fail);
  680. for (i = 0; i < s->picture_count; i++) {
  681. avcodec_get_frame_defaults(&s->picture[i].f);
  682. }
  683. FF_ALLOC_OR_GOTO(s->avctx, s->er_temp_buffer,
  684. mb_array_size * sizeof(uint8_t), fail);
  685. FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table,
  686. mb_array_size * sizeof(uint8_t), fail);
  687. if(s->codec_id==AV_CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
  688. /* interlaced direct mode decoding tables */
  689. for (i = 0; i < 2; i++) {
  690. int j, k;
  691. for (j = 0; j < 2; j++) {
  692. for (k = 0; k < 2; k++) {
  693. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
  694. s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
  695. }
  696. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
  697. FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
  698. s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
  699. }
  700. FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
  701. }
  702. }
  703. if (s->out_format == FMT_H263) {
  704. /* cbp values */
  705. FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
  706. s->coded_block = s->coded_block_base + s->b8_stride + 1;
  707. /* cbp, ac_pred, pred_dir */
  708. FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
  709. FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
  710. }
  711. if (s->h263_pred || s->h263_plus || !s->encoding) {
  712. /* dc values */
  713. // MN: we need these for error resilience of intra-frames
  714. FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
  715. s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
  716. s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
  717. s->dc_val[2] = s->dc_val[1] + c_size;
  718. for (i = 0; i < yc_size; i++)
  719. s->dc_val_base[i] = 1024;
  720. }
  721. /* which mb is a intra block */
  722. FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
  723. memset(s->mbintra_table, 1, mb_array_size);
  724. /* init macroblock skip table */
  725. FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
  726. // Note the + 1 is for a quicker mpeg4 slice_end detection
  727. s->parse_context.state = -1;
  728. s->context_initialized = 1;
  729. s->thread_context[0] = s;
  730. // if (s->width && s->height) {
  731. if (nb_slices > 1) {
  732. for (i = 1; i < nb_slices; i++) {
  733. s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
  734. memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
  735. }
  736. for (i = 0; i < nb_slices; i++) {
  737. if (init_duplicate_context(s->thread_context[i], s) < 0)
  738. goto fail;
  739. s->thread_context[i]->start_mb_y =
  740. (s->mb_height * (i) + nb_slices / 2) / nb_slices;
  741. s->thread_context[i]->end_mb_y =
  742. (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
  743. }
  744. } else {
  745. if (init_duplicate_context(s, s) < 0)
  746. goto fail;
  747. s->start_mb_y = 0;
  748. s->end_mb_y = s->mb_height;
  749. }
  750. s->slice_context_count = nb_slices;
  751. // }
  752. return 0;
  753. fail:
  754. ff_MPV_common_end(s);
  755. return -1;
  756. }
  757. /* init common structure for both encoder and decoder */
  758. void ff_MPV_common_end(MpegEncContext *s)
  759. {
  760. int i, j, k;
  761. if (s->slice_context_count > 1) {
  762. for (i = 0; i < s->slice_context_count; i++) {
  763. free_duplicate_context(s->thread_context[i]);
  764. }
  765. for (i = 1; i < s->slice_context_count; i++) {
  766. av_freep(&s->thread_context[i]);
  767. }
  768. s->slice_context_count = 1;
  769. } else free_duplicate_context(s);
  770. av_freep(&s->parse_context.buffer);
  771. s->parse_context.buffer_size = 0;
  772. av_freep(&s->mb_type);
  773. av_freep(&s->p_mv_table_base);
  774. av_freep(&s->b_forw_mv_table_base);
  775. av_freep(&s->b_back_mv_table_base);
  776. av_freep(&s->b_bidir_forw_mv_table_base);
  777. av_freep(&s->b_bidir_back_mv_table_base);
  778. av_freep(&s->b_direct_mv_table_base);
  779. s->p_mv_table = NULL;
  780. s->b_forw_mv_table = NULL;
  781. s->b_back_mv_table = NULL;
  782. s->b_bidir_forw_mv_table = NULL;
  783. s->b_bidir_back_mv_table = NULL;
  784. s->b_direct_mv_table = NULL;
  785. for (i = 0; i < 2; i++) {
  786. for (j = 0; j < 2; j++) {
  787. for (k = 0; k < 2; k++) {
  788. av_freep(&s->b_field_mv_table_base[i][j][k]);
  789. s->b_field_mv_table[i][j][k] = NULL;
  790. }
  791. av_freep(&s->b_field_select_table[i][j]);
  792. av_freep(&s->p_field_mv_table_base[i][j]);
  793. s->p_field_mv_table[i][j] = NULL;
  794. }
  795. av_freep(&s->p_field_select_table[i]);
  796. }
  797. av_freep(&s->dc_val_base);
  798. av_freep(&s->coded_block_base);
  799. av_freep(&s->mbintra_table);
  800. av_freep(&s->cbp_table);
  801. av_freep(&s->pred_dir_table);
  802. av_freep(&s->mbskip_table);
  803. av_freep(&s->bitstream_buffer);
  804. s->allocated_bitstream_buffer_size = 0;
  805. av_freep(&s->avctx->stats_out);
  806. av_freep(&s->ac_stats);
  807. av_freep(&s->error_status_table);
  808. av_freep(&s->er_temp_buffer);
  809. av_freep(&s->mb_index2xy);
  810. av_freep(&s->lambda_table);
  811. if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
  812. if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
  813. s->q_chroma_intra_matrix= NULL;
  814. s->q_chroma_intra_matrix16= NULL;
  815. av_freep(&s->q_intra_matrix);
  816. av_freep(&s->q_inter_matrix);
  817. av_freep(&s->q_intra_matrix16);
  818. av_freep(&s->q_inter_matrix16);
  819. av_freep(&s->input_picture);
  820. av_freep(&s->reordered_input_picture);
  821. av_freep(&s->dct_offset);
  822. av_freep(&s->cplx_tab);
  823. av_freep(&s->bits_tab);
  824. if (s->picture && !s->avctx->internal->is_copy) {
  825. for (i = 0; i < s->picture_count; i++) {
  826. free_picture(s, &s->picture[i]);
  827. }
  828. }
  829. av_freep(&s->picture);
  830. s->context_initialized = 0;
  831. s->last_picture_ptr =
  832. s->next_picture_ptr =
  833. s->current_picture_ptr = NULL;
  834. s->linesize = s->uvlinesize = 0;
  835. for (i = 0; i < 3; i++)
  836. av_freep(&s->visualization_buffer[i]);
  837. if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
  838. avcodec_default_free_buffers(s->avctx);
  839. }
  840. void ff_init_rl(RLTable *rl,
  841. uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
  842. {
  843. int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
  844. uint8_t index_run[MAX_RUN + 1];
  845. int last, run, level, start, end, i;
  846. /* If table is static, we can quit if rl->max_level[0] is not NULL */
  847. if (static_store && rl->max_level[0])
  848. return;
  849. /* compute max_level[], max_run[] and index_run[] */
  850. for (last = 0; last < 2; last++) {
  851. if (last == 0) {
  852. start = 0;
  853. end = rl->last;
  854. } else {
  855. start = rl->last;
  856. end = rl->n;
  857. }
  858. memset(max_level, 0, MAX_RUN + 1);
  859. memset(max_run, 0, MAX_LEVEL + 1);
  860. memset(index_run, rl->n, MAX_RUN + 1);
  861. for (i = start; i < end; i++) {
  862. run = rl->table_run[i];
  863. level = rl->table_level[i];
  864. if (index_run[run] == rl->n)
  865. index_run[run] = i;
  866. if (level > max_level[run])
  867. max_level[run] = level;
  868. if (run > max_run[level])
  869. max_run[level] = run;
  870. }
  871. if (static_store)
  872. rl->max_level[last] = static_store[last];
  873. else
  874. rl->max_level[last] = av_malloc(MAX_RUN + 1);
  875. memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
  876. if (static_store)
  877. rl->max_run[last] = static_store[last] + MAX_RUN + 1;
  878. else
  879. rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
  880. memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
  881. if (static_store)
  882. rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
  883. else
  884. rl->index_run[last] = av_malloc(MAX_RUN + 1);
  885. memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
  886. }
  887. }
  888. void ff_init_vlc_rl(RLTable *rl)
  889. {
  890. int i, q;
  891. for (q = 0; q < 32; q++) {
  892. int qmul = q * 2;
  893. int qadd = (q - 1) | 1;
  894. if (q == 0) {
  895. qmul = 1;
  896. qadd = 0;
  897. }
  898. for (i = 0; i < rl->vlc.table_size; i++) {
  899. int code = rl->vlc.table[i][0];
  900. int len = rl->vlc.table[i][1];
  901. int level, run;
  902. if (len == 0) { // illegal code
  903. run = 66;
  904. level = MAX_LEVEL;
  905. } else if (len < 0) { // more bits needed
  906. run = 0;
  907. level = code;
  908. } else {
  909. if (code == rl->n) { // esc
  910. run = 66;
  911. level = 0;
  912. } else {
  913. run = rl->table_run[code] + 1;
  914. level = rl->table_level[code] * qmul + qadd;
  915. if (code >= rl->last) run += 192;
  916. }
  917. }
  918. rl->rl_vlc[q][i].len = len;
  919. rl->rl_vlc[q][i].level = level;
  920. rl->rl_vlc[q][i].run = run;
  921. }
  922. }
  923. }
  924. void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
  925. {
  926. int i;
  927. /* release non reference frames */
  928. for (i = 0; i < s->picture_count; i++) {
  929. if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
  930. (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
  931. (remove_current || &s->picture[i] != s->current_picture_ptr)
  932. /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
  933. free_frame_buffer(s, &s->picture[i]);
  934. }
  935. }
  936. }
  937. int ff_find_unused_picture(MpegEncContext *s, int shared)
  938. {
  939. int i;
  940. if (shared) {
  941. for (i = s->picture_range_start; i < s->picture_range_end; i++) {
  942. if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
  943. return i;
  944. }
  945. } else {
  946. for (i = s->picture_range_start; i < s->picture_range_end; i++) {
  947. if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
  948. return i; // FIXME
  949. }
  950. for (i = s->picture_range_start; i < s->picture_range_end; i++) {
  951. if (s->picture[i].f.data[0] == NULL)
  952. return i;
  953. }
  954. }
  955. av_log(s->avctx, AV_LOG_FATAL,
  956. "Internal error, picture buffer overflow\n");
  957. /* We could return -1, but the codec would crash trying to draw into a
  958. * non-existing frame anyway. This is safer than waiting for a random crash.
  959. * Also the return of this is never useful, an encoder must only allocate
  960. * as much as allowed in the specification. This has no relationship to how
  961. * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
  962. * enough for such valid streams).
  963. * Plus, a decoder has to check stream validity and remove frames if too
  964. * many reference frames are around. Waiting for "OOM" is not correct at
  965. * all. Similarly, missing reference frames have to be replaced by
  966. * interpolated/MC frames, anything else is a bug in the codec ...
  967. */
  968. abort();
  969. return -1;
  970. }
  971. static void update_noise_reduction(MpegEncContext *s)
  972. {
  973. int intra, i;
  974. for (intra = 0; intra < 2; intra++) {
  975. if (s->dct_count[intra] > (1 << 16)) {
  976. for (i = 0; i < 64; i++) {
  977. s->dct_error_sum[intra][i] >>= 1;
  978. }
  979. s->dct_count[intra] >>= 1;
  980. }
  981. for (i = 0; i < 64; i++) {
  982. s->dct_offset[intra][i] = (s->avctx->noise_reduction *
  983. s->dct_count[intra] +
  984. s->dct_error_sum[intra][i] / 2) /
  985. (s->dct_error_sum[intra][i] + 1);
  986. }
  987. }
  988. }
  989. /**
  990. * generic function for encode/decode called after coding/decoding
  991. * the header and before a frame is coded/decoded.
  992. */
  993. int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
  994. {
  995. int i;
  996. Picture *pic;
  997. s->mb_skipped = 0;
  998. if (!ff_thread_can_start_frame(avctx)) {
  999. av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
  1000. return -1;
  1001. }
  1002. /* mark & release old frames */
  1003. if (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3) {
  1004. if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
  1005. s->last_picture_ptr != s->next_picture_ptr &&
  1006. s->last_picture_ptr->f.data[0]) {
  1007. if (s->last_picture_ptr->owner2 == s)
  1008. free_frame_buffer(s, s->last_picture_ptr);
  1009. }
  1010. /* release forgotten pictures */
  1011. /* if (mpeg124/h263) */
  1012. if (!s->encoding) {
  1013. for (i = 0; i < s->picture_count; i++) {
  1014. if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
  1015. &s->picture[i] != s->last_picture_ptr &&
  1016. &s->picture[i] != s->next_picture_ptr &&
  1017. s->picture[i].f.reference) {
  1018. if (!(avctx->active_thread_type & FF_THREAD_FRAME))
  1019. av_log(avctx, AV_LOG_ERROR,
  1020. "releasing zombie picture\n");
  1021. free_frame_buffer(s, &s->picture[i]);
  1022. }
  1023. }
  1024. }
  1025. }
  1026. if (!s->encoding) {
  1027. ff_release_unused_pictures(s, 1);
  1028. if (s->current_picture_ptr &&
  1029. s->current_picture_ptr->f.data[0] == NULL) {
  1030. // we already have a unused image
  1031. // (maybe it was set before reading the header)
  1032. pic = s->current_picture_ptr;
  1033. } else {
  1034. i = ff_find_unused_picture(s, 0);
  1035. if (i < 0)
  1036. return i;
  1037. pic = &s->picture[i];
  1038. }
  1039. pic->f.reference = 0;
  1040. if (!s->dropable) {
  1041. if (s->codec_id == AV_CODEC_ID_H264)
  1042. pic->f.reference = s->picture_structure;
  1043. else if (s->pict_type != AV_PICTURE_TYPE_B)
  1044. pic->f.reference = 3;
  1045. }
  1046. pic->f.coded_picture_number = s->coded_picture_number++;
  1047. if (ff_alloc_picture(s, pic, 0) < 0)
  1048. return -1;
  1049. s->current_picture_ptr = pic;
  1050. // FIXME use only the vars from current_pic
  1051. s->current_picture_ptr->f.top_field_first = s->top_field_first;
  1052. if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
  1053. s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
  1054. if (s->picture_structure != PICT_FRAME)
  1055. s->current_picture_ptr->f.top_field_first =
  1056. (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
  1057. }
  1058. s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
  1059. !s->progressive_sequence;
  1060. s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
  1061. }
  1062. s->current_picture_ptr->f.pict_type = s->pict_type;
  1063. // if (s->flags && CODEC_FLAG_QSCALE)
  1064. // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
  1065. s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
  1066. ff_copy_picture(&s->current_picture, s->current_picture_ptr);
  1067. if (s->pict_type != AV_PICTURE_TYPE_B) {
  1068. s->last_picture_ptr = s->next_picture_ptr;
  1069. if (!s->dropable)
  1070. s->next_picture_ptr = s->current_picture_ptr;
  1071. }
  1072. /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
  1073. s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
  1074. s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
  1075. s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
  1076. s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
  1077. s->pict_type, s->dropable); */
  1078. if (s->codec_id != AV_CODEC_ID_H264) {
  1079. if ((s->last_picture_ptr == NULL ||
  1080. s->last_picture_ptr->f.data[0] == NULL) &&
  1081. (s->pict_type != AV_PICTURE_TYPE_I ||
  1082. s->picture_structure != PICT_FRAME)) {
  1083. if (s->pict_type != AV_PICTURE_TYPE_I)
  1084. av_log(avctx, AV_LOG_ERROR,
  1085. "warning: first frame is no keyframe\n");
  1086. else if (s->picture_structure != PICT_FRAME)
  1087. av_log(avctx, AV_LOG_INFO,
  1088. "allocate dummy last picture for field based first keyframe\n");
  1089. /* Allocate a dummy frame */
  1090. i = ff_find_unused_picture(s, 0);
  1091. if (i < 0)
  1092. return i;
  1093. s->last_picture_ptr = &s->picture[i];
  1094. s->last_picture_ptr->f.key_frame = 0;
  1095. if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
  1096. s->last_picture_ptr = NULL;
  1097. return -1;
  1098. }
  1099. if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
  1100. for(i=0; i<avctx->height; i++)
  1101. memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
  1102. }
  1103. ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
  1104. ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
  1105. s->last_picture_ptr->f.reference = 3;
  1106. }
  1107. if ((s->next_picture_ptr == NULL ||
  1108. s->next_picture_ptr->f.data[0] == NULL) &&
  1109. s->pict_type == AV_PICTURE_TYPE_B) {
  1110. /* Allocate a dummy frame */
  1111. i = ff_find_unused_picture(s, 0);
  1112. if (i < 0)
  1113. return i;
  1114. s->next_picture_ptr = &s->picture[i];
  1115. s->next_picture_ptr->f.key_frame = 0;
  1116. if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
  1117. s->next_picture_ptr = NULL;
  1118. return -1;
  1119. }
  1120. ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
  1121. ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
  1122. s->next_picture_ptr->f.reference = 3;
  1123. }
  1124. }
  1125. if (s->last_picture_ptr)
  1126. ff_copy_picture(&s->last_picture, s->last_picture_ptr);
  1127. if (s->next_picture_ptr)
  1128. ff_copy_picture(&s->next_picture, s->next_picture_ptr);
  1129. if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME) &&
  1130. (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3)) {
  1131. if (s->next_picture_ptr)
  1132. s->next_picture_ptr->owner2 = s;
  1133. if (s->last_picture_ptr)
  1134. s->last_picture_ptr->owner2 = s;
  1135. }
  1136. assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
  1137. s->last_picture_ptr->f.data[0]));
  1138. if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
  1139. int i;
  1140. for (i = 0; i < 4; i++) {
  1141. if (s->picture_structure == PICT_BOTTOM_FIELD) {
  1142. s->current_picture.f.data[i] +=
  1143. s->current_picture.f.linesize[i];
  1144. }
  1145. s->current_picture.f.linesize[i] *= 2;
  1146. s->last_picture.f.linesize[i] *= 2;
  1147. s->next_picture.f.linesize[i] *= 2;
  1148. }
  1149. }
  1150. s->err_recognition = avctx->err_recognition;
  1151. /* set dequantizer, we can't do it during init as
  1152. * it might change for mpeg4 and we can't do it in the header
  1153. * decode as init is not called for mpeg4 there yet */
  1154. if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
  1155. s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
  1156. s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
  1157. } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
  1158. s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
  1159. s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
  1160. } else {
  1161. s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
  1162. s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
  1163. }
  1164. if (s->dct_error_sum) {
  1165. assert(s->avctx->noise_reduction && s->encoding);
  1166. update_noise_reduction(s);
  1167. }
  1168. if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
  1169. return ff_xvmc_field_start(s, avctx);
  1170. return 0;
  1171. }
  1172. /* generic function for encode/decode called after a
  1173. * frame has been coded/decoded. */
  1174. void ff_MPV_frame_end(MpegEncContext *s)
  1175. {
  1176. int i;
  1177. /* redraw edges for the frame if decoding didn't complete */
  1178. // just to make sure that all data is rendered.
  1179. if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
  1180. ff_xvmc_field_end(s);
  1181. } else if((s->error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) &&
  1182. !s->avctx->hwaccel &&
  1183. !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
  1184. s->unrestricted_mv &&
  1185. s->current_picture.f.reference &&
  1186. !s->intra_only &&
  1187. !(s->flags & CODEC_FLAG_EMU_EDGE) &&
  1188. !s->avctx->lowres
  1189. ) {
  1190. int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
  1191. int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
  1192. s->dsp.draw_edges(s->current_picture.f.data[0], s->current_picture.f.linesize[0],
  1193. s->h_edge_pos, s->v_edge_pos,
  1194. EDGE_WIDTH, EDGE_WIDTH,
  1195. EDGE_TOP | EDGE_BOTTOM);
  1196. s->dsp.draw_edges(s->current_picture.f.data[1], s->current_picture.f.linesize[1],
  1197. s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
  1198. EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
  1199. EDGE_TOP | EDGE_BOTTOM);
  1200. s->dsp.draw_edges(s->current_picture.f.data[2], s->current_picture.f.linesize[2],
  1201. s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
  1202. EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
  1203. EDGE_TOP | EDGE_BOTTOM);
  1204. }
  1205. emms_c();
  1206. s->last_pict_type = s->pict_type;
  1207. s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
  1208. if (s->pict_type!= AV_PICTURE_TYPE_B) {
  1209. s->last_non_b_pict_type = s->pict_type;
  1210. }
  1211. #if 0
  1212. /* copy back current_picture variables */
  1213. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1214. if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
  1215. s->picture[i] = s->current_picture;
  1216. break;
  1217. }
  1218. }
  1219. assert(i < MAX_PICTURE_COUNT);
  1220. #endif
  1221. if (s->encoding) {
  1222. /* release non-reference frames */
  1223. for (i = 0; i < s->picture_count; i++) {
  1224. if (s->picture[i].f.data[0] && !s->picture[i].f.reference
  1225. /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
  1226. free_frame_buffer(s, &s->picture[i]);
  1227. }
  1228. }
  1229. }
  1230. // clear copies, to avoid confusion
  1231. #if 0
  1232. memset(&s->last_picture, 0, sizeof(Picture));
  1233. memset(&s->next_picture, 0, sizeof(Picture));
  1234. memset(&s->current_picture, 0, sizeof(Picture));
  1235. #endif
  1236. s->avctx->coded_frame = &s->current_picture_ptr->f;
  1237. if (s->codec_id != AV_CODEC_ID_H264 && s->current_picture.f.reference) {
  1238. ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
  1239. }
  1240. }
  1241. /**
  1242. * Draw a line from (ex, ey) -> (sx, sy).
  1243. * @param w width of the image
  1244. * @param h height of the image
  1245. * @param stride stride/linesize of the image
  1246. * @param color color of the arrow
  1247. */
  1248. static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
  1249. int w, int h, int stride, int color)
  1250. {
  1251. int x, y, fr, f;
  1252. sx = av_clip(sx, 0, w - 1);
  1253. sy = av_clip(sy, 0, h - 1);
  1254. ex = av_clip(ex, 0, w - 1);
  1255. ey = av_clip(ey, 0, h - 1);
  1256. buf[sy * stride + sx] += color;
  1257. if (FFABS(ex - sx) > FFABS(ey - sy)) {
  1258. if (sx > ex) {
  1259. FFSWAP(int, sx, ex);
  1260. FFSWAP(int, sy, ey);
  1261. }
  1262. buf += sx + sy * stride;
  1263. ex -= sx;
  1264. f = ((ey - sy) << 16) / ex;
  1265. for(x= 0; x <= ex; x++){
  1266. y = (x * f) >> 16;
  1267. fr = (x * f) & 0xFFFF;
  1268. buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
  1269. if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
  1270. }
  1271. } else {
  1272. if (sy > ey) {
  1273. FFSWAP(int, sx, ex);
  1274. FFSWAP(int, sy, ey);
  1275. }
  1276. buf += sx + sy * stride;
  1277. ey -= sy;
  1278. if (ey)
  1279. f = ((ex - sx) << 16) / ey;
  1280. else
  1281. f = 0;
  1282. for(y= 0; y <= ey; y++){
  1283. x = (y*f) >> 16;
  1284. fr = (y*f) & 0xFFFF;
  1285. buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
  1286. if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
  1287. }
  1288. }
  1289. }
  1290. /**
  1291. * Draw an arrow from (ex, ey) -> (sx, sy).
  1292. * @param w width of the image
  1293. * @param h height of the image
  1294. * @param stride stride/linesize of the image
  1295. * @param color color of the arrow
  1296. */
  1297. static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
  1298. int ey, int w, int h, int stride, int color)
  1299. {
  1300. int dx,dy;
  1301. sx = av_clip(sx, -100, w + 100);
  1302. sy = av_clip(sy, -100, h + 100);
  1303. ex = av_clip(ex, -100, w + 100);
  1304. ey = av_clip(ey, -100, h + 100);
  1305. dx = ex - sx;
  1306. dy = ey - sy;
  1307. if (dx * dx + dy * dy > 3 * 3) {
  1308. int rx = dx + dy;
  1309. int ry = -dx + dy;
  1310. int length = ff_sqrt((rx * rx + ry * ry) << 8);
  1311. // FIXME subpixel accuracy
  1312. rx = ROUNDED_DIV(rx * 3 << 4, length);
  1313. ry = ROUNDED_DIV(ry * 3 << 4, length);
  1314. draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
  1315. draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
  1316. }
  1317. draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
  1318. }
  1319. /**
  1320. * Print debugging info for the given picture.
  1321. */
  1322. void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
  1323. {
  1324. if (s->avctx->hwaccel || !pict || !pict->mb_type)
  1325. return;
  1326. if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
  1327. int x,y;
  1328. av_log(s->avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
  1329. av_get_picture_type_char(pict->pict_type));
  1330. for (y = 0; y < s->mb_height; y++) {
  1331. for (x = 0; x < s->mb_width; x++) {
  1332. if (s->avctx->debug & FF_DEBUG_SKIP) {
  1333. int count = s->mbskip_table[x + y * s->mb_stride];
  1334. if (count > 9)
  1335. count = 9;
  1336. av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
  1337. }
  1338. if (s->avctx->debug & FF_DEBUG_QP) {
  1339. av_log(s->avctx, AV_LOG_DEBUG, "%2d",
  1340. pict->qscale_table[x + y * s->mb_stride]);
  1341. }
  1342. if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
  1343. int mb_type = pict->mb_type[x + y * s->mb_stride];
  1344. // Type & MV direction
  1345. if (IS_PCM(mb_type))
  1346. av_log(s->avctx, AV_LOG_DEBUG, "P");
  1347. else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
  1348. av_log(s->avctx, AV_LOG_DEBUG, "A");
  1349. else if (IS_INTRA4x4(mb_type))
  1350. av_log(s->avctx, AV_LOG_DEBUG, "i");
  1351. else if (IS_INTRA16x16(mb_type))
  1352. av_log(s->avctx, AV_LOG_DEBUG, "I");
  1353. else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
  1354. av_log(s->avctx, AV_LOG_DEBUG, "d");
  1355. else if (IS_DIRECT(mb_type))
  1356. av_log(s->avctx, AV_LOG_DEBUG, "D");
  1357. else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
  1358. av_log(s->avctx, AV_LOG_DEBUG, "g");
  1359. else if (IS_GMC(mb_type))
  1360. av_log(s->avctx, AV_LOG_DEBUG, "G");
  1361. else if (IS_SKIP(mb_type))
  1362. av_log(s->avctx, AV_LOG_DEBUG, "S");
  1363. else if (!USES_LIST(mb_type, 1))
  1364. av_log(s->avctx, AV_LOG_DEBUG, ">");
  1365. else if (!USES_LIST(mb_type, 0))
  1366. av_log(s->avctx, AV_LOG_DEBUG, "<");
  1367. else {
  1368. av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
  1369. av_log(s->avctx, AV_LOG_DEBUG, "X");
  1370. }
  1371. // segmentation
  1372. if (IS_8X8(mb_type))
  1373. av_log(s->avctx, AV_LOG_DEBUG, "+");
  1374. else if (IS_16X8(mb_type))
  1375. av_log(s->avctx, AV_LOG_DEBUG, "-");
  1376. else if (IS_8X16(mb_type))
  1377. av_log(s->avctx, AV_LOG_DEBUG, "|");
  1378. else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
  1379. av_log(s->avctx, AV_LOG_DEBUG, " ");
  1380. else
  1381. av_log(s->avctx, AV_LOG_DEBUG, "?");
  1382. if (IS_INTERLACED(mb_type))
  1383. av_log(s->avctx, AV_LOG_DEBUG, "=");
  1384. else
  1385. av_log(s->avctx, AV_LOG_DEBUG, " ");
  1386. }
  1387. // av_log(s->avctx, AV_LOG_DEBUG, " ");
  1388. }
  1389. av_log(s->avctx, AV_LOG_DEBUG, "\n");
  1390. }
  1391. }
  1392. if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
  1393. (s->avctx->debug_mv)) {
  1394. const int shift = 1 + s->quarter_sample;
  1395. int mb_y;
  1396. uint8_t *ptr;
  1397. int i;
  1398. int h_chroma_shift, v_chroma_shift, block_height;
  1399. const int width = s->avctx->width;
  1400. const int height = s->avctx->height;
  1401. const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
  1402. const int mv_stride = (s->mb_width << mv_sample_log2) +
  1403. (s->codec_id == AV_CODEC_ID_H264 ? 0 : 1);
  1404. s->low_delay = 0; // needed to see the vectors without trashing the buffers
  1405. avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
  1406. &h_chroma_shift, &v_chroma_shift);
  1407. for (i = 0; i < 3; i++) {
  1408. size_t size= (i == 0) ? pict->linesize[i] * FFALIGN(height, 16):
  1409. pict->linesize[i] * FFALIGN(height, 16) >> v_chroma_shift;
  1410. s->visualization_buffer[i]= av_realloc(s->visualization_buffer[i], size);
  1411. memcpy(s->visualization_buffer[i], pict->data[i], size);
  1412. pict->data[i] = s->visualization_buffer[i];
  1413. }
  1414. pict->type = FF_BUFFER_TYPE_COPY;
  1415. pict->opaque= NULL;
  1416. ptr = pict->data[0];
  1417. block_height = 16 >> v_chroma_shift;
  1418. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  1419. int mb_x;
  1420. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  1421. const int mb_index = mb_x + mb_y * s->mb_stride;
  1422. if ((s->avctx->debug_mv) && pict->motion_val) {
  1423. int type;
  1424. for (type = 0; type < 3; type++) {
  1425. int direction = 0;
  1426. switch (type) {
  1427. case 0:
  1428. if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
  1429. (pict->pict_type!= AV_PICTURE_TYPE_P))
  1430. continue;
  1431. direction = 0;
  1432. break;
  1433. case 1:
  1434. if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
  1435. (pict->pict_type!= AV_PICTURE_TYPE_B))
  1436. continue;
  1437. direction = 0;
  1438. break;
  1439. case 2:
  1440. if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
  1441. (pict->pict_type!= AV_PICTURE_TYPE_B))
  1442. continue;
  1443. direction = 1;
  1444. break;
  1445. }
  1446. if (!USES_LIST(pict->mb_type[mb_index], direction))
  1447. continue;
  1448. if (IS_8X8(pict->mb_type[mb_index])) {
  1449. int i;
  1450. for (i = 0; i < 4; i++) {
  1451. int sx = mb_x * 16 + 4 + 8 * (i & 1);
  1452. int sy = mb_y * 16 + 4 + 8 * (i >> 1);
  1453. int xy = (mb_x * 2 + (i & 1) +
  1454. (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
  1455. int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
  1456. int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
  1457. draw_arrow(ptr, sx, sy, mx, my, width,
  1458. height, s->linesize, 100);
  1459. }
  1460. } else if (IS_16X8(pict->mb_type[mb_index])) {
  1461. int i;
  1462. for (i = 0; i < 2; i++) {
  1463. int sx = mb_x * 16 + 8;
  1464. int sy = mb_y * 16 + 4 + 8 * i;
  1465. int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
  1466. int mx = (pict->motion_val[direction][xy][0] >> shift);
  1467. int my = (pict->motion_val[direction][xy][1] >> shift);
  1468. if (IS_INTERLACED(pict->mb_type[mb_index]))
  1469. my *= 2;
  1470. draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
  1471. height, s->linesize, 100);
  1472. }
  1473. } else if (IS_8X16(pict->mb_type[mb_index])) {
  1474. int i;
  1475. for (i = 0; i < 2; i++) {
  1476. int sx = mb_x * 16 + 4 + 8 * i;
  1477. int sy = mb_y * 16 + 8;
  1478. int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
  1479. int mx = pict->motion_val[direction][xy][0] >> shift;
  1480. int my = pict->motion_val[direction][xy][1] >> shift;
  1481. if (IS_INTERLACED(pict->mb_type[mb_index]))
  1482. my *= 2;
  1483. draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
  1484. height, s->linesize, 100);
  1485. }
  1486. } else {
  1487. int sx= mb_x * 16 + 8;
  1488. int sy= mb_y * 16 + 8;
  1489. int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
  1490. int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
  1491. int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
  1492. draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
  1493. }
  1494. }
  1495. }
  1496. if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
  1497. uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
  1498. 0x0101010101010101ULL;
  1499. int y;
  1500. for (y = 0; y < block_height; y++) {
  1501. *(uint64_t *)(pict->data[1] + 8 * mb_x +
  1502. (block_height * mb_y + y) *
  1503. pict->linesize[1]) = c;
  1504. *(uint64_t *)(pict->data[2] + 8 * mb_x +
  1505. (block_height * mb_y + y) *
  1506. pict->linesize[2]) = c;
  1507. }
  1508. }
  1509. if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
  1510. pict->motion_val) {
  1511. int mb_type = pict->mb_type[mb_index];
  1512. uint64_t u,v;
  1513. int y;
  1514. #define COLOR(theta, r) \
  1515. u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
  1516. v = (int)(128 + r * sin(theta * 3.141592 / 180));
  1517. u = v = 128;
  1518. if (IS_PCM(mb_type)) {
  1519. COLOR(120, 48)
  1520. } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
  1521. IS_INTRA16x16(mb_type)) {
  1522. COLOR(30, 48)
  1523. } else if (IS_INTRA4x4(mb_type)) {
  1524. COLOR(90, 48)
  1525. } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
  1526. // COLOR(120, 48)
  1527. } else if (IS_DIRECT(mb_type)) {
  1528. COLOR(150, 48)
  1529. } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
  1530. COLOR(170, 48)
  1531. } else if (IS_GMC(mb_type)) {
  1532. COLOR(190, 48)
  1533. } else if (IS_SKIP(mb_type)) {
  1534. // COLOR(180, 48)
  1535. } else if (!USES_LIST(mb_type, 1)) {
  1536. COLOR(240, 48)
  1537. } else if (!USES_LIST(mb_type, 0)) {
  1538. COLOR(0, 48)
  1539. } else {
  1540. av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
  1541. COLOR(300,48)
  1542. }
  1543. u *= 0x0101010101010101ULL;
  1544. v *= 0x0101010101010101ULL;
  1545. for (y = 0; y < block_height; y++) {
  1546. *(uint64_t *)(pict->data[1] + 8 * mb_x +
  1547. (block_height * mb_y + y) * pict->linesize[1]) = u;
  1548. *(uint64_t *)(pict->data[2] + 8 * mb_x +
  1549. (block_height * mb_y + y) * pict->linesize[2]) = v;
  1550. }
  1551. // segmentation
  1552. if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
  1553. *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
  1554. (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
  1555. *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
  1556. (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
  1557. }
  1558. if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
  1559. for (y = 0; y < 16; y++)
  1560. pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
  1561. pict->linesize[0]] ^= 0x80;
  1562. }
  1563. if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
  1564. int dm = 1 << (mv_sample_log2 - 2);
  1565. for (i = 0; i < 4; i++) {
  1566. int sx = mb_x * 16 + 8 * (i & 1);
  1567. int sy = mb_y * 16 + 8 * (i >> 1);
  1568. int xy = (mb_x * 2 + (i & 1) +
  1569. (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
  1570. // FIXME bidir
  1571. int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
  1572. if (mv[0] != mv[dm] ||
  1573. mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
  1574. for (y = 0; y < 8; y++)
  1575. pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
  1576. if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
  1577. *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
  1578. pict->linesize[0]) ^= 0x8080808080808080ULL;
  1579. }
  1580. }
  1581. if (IS_INTERLACED(mb_type) &&
  1582. s->codec_id == AV_CODEC_ID_H264) {
  1583. // hmm
  1584. }
  1585. }
  1586. s->mbskip_table[mb_index] = 0;
  1587. }
  1588. }
  1589. }
  1590. }
  1591. static inline int hpel_motion_lowres(MpegEncContext *s,
  1592. uint8_t *dest, uint8_t *src,
  1593. int field_based, int field_select,
  1594. int src_x, int src_y,
  1595. int width, int height, int stride,
  1596. int h_edge_pos, int v_edge_pos,
  1597. int w, int h, h264_chroma_mc_func *pix_op,
  1598. int motion_x, int motion_y)
  1599. {
  1600. const int lowres = s->avctx->lowres;
  1601. const int op_index = FFMIN(lowres, 2);
  1602. const int s_mask = (2 << lowres) - 1;
  1603. int emu = 0;
  1604. int sx, sy;
  1605. if (s->quarter_sample) {
  1606. motion_x /= 2;
  1607. motion_y /= 2;
  1608. }
  1609. sx = motion_x & s_mask;
  1610. sy = motion_y & s_mask;
  1611. src_x += motion_x >> lowres + 1;
  1612. src_y += motion_y >> lowres + 1;
  1613. src += src_y * stride + src_x;
  1614. if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
  1615. (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
  1616. s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
  1617. (h + 1) << field_based, src_x,
  1618. src_y << field_based,
  1619. h_edge_pos,
  1620. v_edge_pos);
  1621. src = s->edge_emu_buffer;
  1622. emu = 1;
  1623. }
  1624. sx = (sx << 2) >> lowres;
  1625. sy = (sy << 2) >> lowres;
  1626. if (field_select)
  1627. src += s->linesize;
  1628. pix_op[op_index](dest, src, stride, h, sx, sy);
  1629. return emu;
  1630. }
  1631. /* apply one mpeg motion vector to the three components */
  1632. static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
  1633. uint8_t *dest_y,
  1634. uint8_t *dest_cb,
  1635. uint8_t *dest_cr,
  1636. int field_based,
  1637. int bottom_field,
  1638. int field_select,
  1639. uint8_t **ref_picture,
  1640. h264_chroma_mc_func *pix_op,
  1641. int motion_x, int motion_y,
  1642. int h, int mb_y)
  1643. {
  1644. uint8_t *ptr_y, *ptr_cb, *ptr_cr;
  1645. int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
  1646. uvsx, uvsy;
  1647. const int lowres = s->avctx->lowres;
  1648. const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 2);
  1649. const int block_s = 8>>lowres;
  1650. const int s_mask = (2 << lowres) - 1;
  1651. const int h_edge_pos = s->h_edge_pos >> lowres;
  1652. const int v_edge_pos = s->v_edge_pos >> lowres;
  1653. linesize = s->current_picture.f.linesize[0] << field_based;
  1654. uvlinesize = s->current_picture.f.linesize[1] << field_based;
  1655. // FIXME obviously not perfect but qpel will not work in lowres anyway
  1656. if (s->quarter_sample) {
  1657. motion_x /= 2;
  1658. motion_y /= 2;
  1659. }
  1660. if(field_based){
  1661. motion_y += (bottom_field - field_select)*((1 << lowres)-1);
  1662. }
  1663. sx = motion_x & s_mask;
  1664. sy = motion_y & s_mask;
  1665. src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
  1666. src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
  1667. if (s->out_format == FMT_H263) {
  1668. uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
  1669. uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
  1670. uvsrc_x = src_x >> 1;
  1671. uvsrc_y = src_y >> 1;
  1672. } else if (s->out_format == FMT_H261) {
  1673. // even chroma mv's are full pel in H261
  1674. mx = motion_x / 4;
  1675. my = motion_y / 4;
  1676. uvsx = (2 * mx) & s_mask;
  1677. uvsy = (2 * my) & s_mask;
  1678. uvsrc_x = s->mb_x * block_s + (mx >> lowres);
  1679. uvsrc_y = mb_y * block_s + (my >> lowres);
  1680. } else {
  1681. if(s->chroma_y_shift){
  1682. mx = motion_x / 2;
  1683. my = motion_y / 2;
  1684. uvsx = mx & s_mask;
  1685. uvsy = my & s_mask;
  1686. uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
  1687. uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
  1688. } else {
  1689. if(s->chroma_x_shift){
  1690. //Chroma422
  1691. mx = motion_x / 2;
  1692. uvsx = mx & s_mask;
  1693. uvsy = motion_y & s_mask;
  1694. uvsrc_y = src_y;
  1695. uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
  1696. } else {
  1697. //Chroma444
  1698. uvsx = motion_x & s_mask;
  1699. uvsy = motion_y & s_mask;
  1700. uvsrc_x = src_x;
  1701. uvsrc_y = src_y;
  1702. }
  1703. }
  1704. }
  1705. ptr_y = ref_picture[0] + src_y * linesize + src_x;
  1706. ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
  1707. ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
  1708. if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) ||
  1709. (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
  1710. s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
  1711. s->linesize, 17, 17 + field_based,
  1712. src_x, src_y << field_based, h_edge_pos,
  1713. v_edge_pos);
  1714. ptr_y = s->edge_emu_buffer;
  1715. if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
  1716. uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
  1717. s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9,
  1718. 9 + field_based,
  1719. uvsrc_x, uvsrc_y << field_based,
  1720. h_edge_pos >> 1, v_edge_pos >> 1);
  1721. s->dsp.emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize, 9,
  1722. 9 + field_based,
  1723. uvsrc_x, uvsrc_y << field_based,
  1724. h_edge_pos >> 1, v_edge_pos >> 1);
  1725. ptr_cb = uvbuf;
  1726. ptr_cr = uvbuf + 16;
  1727. }
  1728. }
  1729. // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
  1730. if (bottom_field) {
  1731. dest_y += s->linesize;
  1732. dest_cb += s->uvlinesize;
  1733. dest_cr += s->uvlinesize;
  1734. }
  1735. if (field_select) {
  1736. ptr_y += s->linesize;
  1737. ptr_cb += s->uvlinesize;
  1738. ptr_cr += s->uvlinesize;
  1739. }
  1740. sx = (sx << 2) >> lowres;
  1741. sy = (sy << 2) >> lowres;
  1742. pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
  1743. if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
  1744. uvsx = (uvsx << 2) >> lowres;
  1745. uvsy = (uvsy << 2) >> lowres;
  1746. if (h >> s->chroma_y_shift) {
  1747. pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
  1748. pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
  1749. }
  1750. }
  1751. // FIXME h261 lowres loop filter
  1752. }
  1753. static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
  1754. uint8_t *dest_cb, uint8_t *dest_cr,
  1755. uint8_t **ref_picture,
  1756. h264_chroma_mc_func * pix_op,
  1757. int mx, int my)
  1758. {
  1759. const int lowres = s->avctx->lowres;
  1760. const int op_index = FFMIN(lowres, 2);
  1761. const int block_s = 8 >> lowres;
  1762. const int s_mask = (2 << lowres) - 1;
  1763. const int h_edge_pos = s->h_edge_pos >> lowres + 1;
  1764. const int v_edge_pos = s->v_edge_pos >> lowres + 1;
  1765. int emu = 0, src_x, src_y, offset, sx, sy;
  1766. uint8_t *ptr;
  1767. if (s->quarter_sample) {
  1768. mx /= 2;
  1769. my /= 2;
  1770. }
  1771. /* In case of 8X8, we construct a single chroma motion vector
  1772. with a special rounding */
  1773. mx = ff_h263_round_chroma(mx);
  1774. my = ff_h263_round_chroma(my);
  1775. sx = mx & s_mask;
  1776. sy = my & s_mask;
  1777. src_x = s->mb_x * block_s + (mx >> lowres + 1);
  1778. src_y = s->mb_y * block_s + (my >> lowres + 1);
  1779. offset = src_y * s->uvlinesize + src_x;
  1780. ptr = ref_picture[1] + offset;
  1781. if (s->flags & CODEC_FLAG_EMU_EDGE) {
  1782. if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
  1783. (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
  1784. s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
  1785. 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
  1786. ptr = s->edge_emu_buffer;
  1787. emu = 1;
  1788. }
  1789. }
  1790. sx = (sx << 2) >> lowres;
  1791. sy = (sy << 2) >> lowres;
  1792. pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
  1793. ptr = ref_picture[2] + offset;
  1794. if (emu) {
  1795. s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
  1796. src_x, src_y, h_edge_pos, v_edge_pos);
  1797. ptr = s->edge_emu_buffer;
  1798. }
  1799. pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
  1800. }
  1801. /**
  1802. * motion compensation of a single macroblock
  1803. * @param s context
  1804. * @param dest_y luma destination pointer
  1805. * @param dest_cb chroma cb/u destination pointer
  1806. * @param dest_cr chroma cr/v destination pointer
  1807. * @param dir direction (0->forward, 1->backward)
  1808. * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
  1809. * @param pix_op halfpel motion compensation function (average or put normally)
  1810. * the motion vectors are taken from s->mv and the MV type from s->mv_type
  1811. */
  1812. static inline void MPV_motion_lowres(MpegEncContext *s,
  1813. uint8_t *dest_y, uint8_t *dest_cb,
  1814. uint8_t *dest_cr,
  1815. int dir, uint8_t **ref_picture,
  1816. h264_chroma_mc_func *pix_op)
  1817. {
  1818. int mx, my;
  1819. int mb_x, mb_y, i;
  1820. const int lowres = s->avctx->lowres;
  1821. const int block_s = 8 >>lowres;
  1822. mb_x = s->mb_x;
  1823. mb_y = s->mb_y;
  1824. switch (s->mv_type) {
  1825. case MV_TYPE_16X16:
  1826. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  1827. 0, 0, 0,
  1828. ref_picture, pix_op,
  1829. s->mv[dir][0][0], s->mv[dir][0][1],
  1830. 2 * block_s, mb_y);
  1831. break;
  1832. case MV_TYPE_8X8:
  1833. mx = 0;
  1834. my = 0;
  1835. for (i = 0; i < 4; i++) {
  1836. hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
  1837. s->linesize) * block_s,
  1838. ref_picture[0], 0, 0,
  1839. (2 * mb_x + (i & 1)) * block_s,
  1840. (2 * mb_y + (i >> 1)) * block_s,
  1841. s->width, s->height, s->linesize,
  1842. s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
  1843. block_s, block_s, pix_op,
  1844. s->mv[dir][i][0], s->mv[dir][i][1]);
  1845. mx += s->mv[dir][i][0];
  1846. my += s->mv[dir][i][1];
  1847. }
  1848. if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
  1849. chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
  1850. pix_op, mx, my);
  1851. break;
  1852. case MV_TYPE_FIELD:
  1853. if (s->picture_structure == PICT_FRAME) {
  1854. /* top field */
  1855. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  1856. 1, 0, s->field_select[dir][0],
  1857. ref_picture, pix_op,
  1858. s->mv[dir][0][0], s->mv[dir][0][1],
  1859. block_s, mb_y);
  1860. /* bottom field */
  1861. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  1862. 1, 1, s->field_select[dir][1],
  1863. ref_picture, pix_op,
  1864. s->mv[dir][1][0], s->mv[dir][1][1],
  1865. block_s, mb_y);
  1866. } else {
  1867. if (s->picture_structure != s->field_select[dir][0] + 1 &&
  1868. s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
  1869. ref_picture = s->current_picture_ptr->f.data;
  1870. }
  1871. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  1872. 0, 0, s->field_select[dir][0],
  1873. ref_picture, pix_op,
  1874. s->mv[dir][0][0],
  1875. s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
  1876. }
  1877. break;
  1878. case MV_TYPE_16X8:
  1879. for (i = 0; i < 2; i++) {
  1880. uint8_t **ref2picture;
  1881. if (s->picture_structure == s->field_select[dir][i] + 1 ||
  1882. s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
  1883. ref2picture = ref_picture;
  1884. } else {
  1885. ref2picture = s->current_picture_ptr->f.data;
  1886. }
  1887. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  1888. 0, 0, s->field_select[dir][i],
  1889. ref2picture, pix_op,
  1890. s->mv[dir][i][0], s->mv[dir][i][1] +
  1891. 2 * block_s * i, block_s, mb_y >> 1);
  1892. dest_y += 2 * block_s * s->linesize;
  1893. dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
  1894. dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
  1895. }
  1896. break;
  1897. case MV_TYPE_DMV:
  1898. if (s->picture_structure == PICT_FRAME) {
  1899. for (i = 0; i < 2; i++) {
  1900. int j;
  1901. for (j = 0; j < 2; j++) {
  1902. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  1903. 1, j, j ^ i,
  1904. ref_picture, pix_op,
  1905. s->mv[dir][2 * i + j][0],
  1906. s->mv[dir][2 * i + j][1],
  1907. block_s, mb_y);
  1908. }
  1909. pix_op = s->dsp.avg_h264_chroma_pixels_tab;
  1910. }
  1911. } else {
  1912. for (i = 0; i < 2; i++) {
  1913. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  1914. 0, 0, s->picture_structure != i + 1,
  1915. ref_picture, pix_op,
  1916. s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
  1917. 2 * block_s, mb_y >> 1);
  1918. // after put we make avg of the same block
  1919. pix_op = s->dsp.avg_h264_chroma_pixels_tab;
  1920. // opposite parity is always in the same
  1921. // frame if this is second field
  1922. if (!s->first_field) {
  1923. ref_picture = s->current_picture_ptr->f.data;
  1924. }
  1925. }
  1926. }
  1927. break;
  1928. default:
  1929. av_assert2(0);
  1930. }
  1931. }
  1932. /**
  1933. * find the lowest MB row referenced in the MVs
  1934. */
  1935. int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
  1936. {
  1937. int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
  1938. int my, off, i, mvs;
  1939. if (s->picture_structure != PICT_FRAME) goto unhandled;
  1940. switch (s->mv_type) {
  1941. case MV_TYPE_16X16:
  1942. mvs = 1;
  1943. break;
  1944. case MV_TYPE_16X8:
  1945. mvs = 2;
  1946. break;
  1947. case MV_TYPE_8X8:
  1948. mvs = 4;
  1949. break;
  1950. default:
  1951. goto unhandled;
  1952. }
  1953. for (i = 0; i < mvs; i++) {
  1954. my = s->mv[dir][i][1]<<qpel_shift;
  1955. my_max = FFMAX(my_max, my);
  1956. my_min = FFMIN(my_min, my);
  1957. }
  1958. off = (FFMAX(-my_min, my_max) + 63) >> 6;
  1959. return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
  1960. unhandled:
  1961. return s->mb_height-1;
  1962. }
  1963. /* put block[] to dest[] */
  1964. static inline void put_dct(MpegEncContext *s,
  1965. DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
  1966. {
  1967. s->dct_unquantize_intra(s, block, i, qscale);
  1968. s->dsp.idct_put (dest, line_size, block);
  1969. }
  1970. /* add block[] to dest[] */
  1971. static inline void add_dct(MpegEncContext *s,
  1972. DCTELEM *block, int i, uint8_t *dest, int line_size)
  1973. {
  1974. if (s->block_last_index[i] >= 0) {
  1975. s->dsp.idct_add (dest, line_size, block);
  1976. }
  1977. }
  1978. static inline void add_dequant_dct(MpegEncContext *s,
  1979. DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
  1980. {
  1981. if (s->block_last_index[i] >= 0) {
  1982. s->dct_unquantize_inter(s, block, i, qscale);
  1983. s->dsp.idct_add (dest, line_size, block);
  1984. }
  1985. }
  1986. /**
  1987. * Clean dc, ac, coded_block for the current non-intra MB.
  1988. */
  1989. void ff_clean_intra_table_entries(MpegEncContext *s)
  1990. {
  1991. int wrap = s->b8_stride;
  1992. int xy = s->block_index[0];
  1993. s->dc_val[0][xy ] =
  1994. s->dc_val[0][xy + 1 ] =
  1995. s->dc_val[0][xy + wrap] =
  1996. s->dc_val[0][xy + 1 + wrap] = 1024;
  1997. /* ac pred */
  1998. memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
  1999. memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
  2000. if (s->msmpeg4_version>=3) {
  2001. s->coded_block[xy ] =
  2002. s->coded_block[xy + 1 ] =
  2003. s->coded_block[xy + wrap] =
  2004. s->coded_block[xy + 1 + wrap] = 0;
  2005. }
  2006. /* chroma */
  2007. wrap = s->mb_stride;
  2008. xy = s->mb_x + s->mb_y * wrap;
  2009. s->dc_val[1][xy] =
  2010. s->dc_val[2][xy] = 1024;
  2011. /* ac pred */
  2012. memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
  2013. memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
  2014. s->mbintra_table[xy]= 0;
  2015. }
  2016. /* generic function called after a macroblock has been parsed by the
  2017. decoder or after it has been encoded by the encoder.
  2018. Important variables used:
  2019. s->mb_intra : true if intra macroblock
  2020. s->mv_dir : motion vector direction
  2021. s->mv_type : motion vector type
  2022. s->mv : motion vector
  2023. s->interlaced_dct : true if interlaced dct used (mpeg2)
  2024. */
  2025. static av_always_inline
  2026. void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
  2027. int lowres_flag, int is_mpeg12)
  2028. {
  2029. const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
  2030. if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
  2031. ff_xvmc_decode_mb(s);//xvmc uses pblocks
  2032. return;
  2033. }
  2034. if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
  2035. /* save DCT coefficients */
  2036. int i,j;
  2037. DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
  2038. av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
  2039. for(i=0; i<6; i++){
  2040. for(j=0; j<64; j++){
  2041. *dct++ = block[i][s->dsp.idct_permutation[j]];
  2042. av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
  2043. }
  2044. av_log(s->avctx, AV_LOG_DEBUG, "\n");
  2045. }
  2046. }
  2047. s->current_picture.f.qscale_table[mb_xy] = s->qscale;
  2048. /* update DC predictors for P macroblocks */
  2049. if (!s->mb_intra) {
  2050. if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
  2051. if(s->mbintra_table[mb_xy])
  2052. ff_clean_intra_table_entries(s);
  2053. } else {
  2054. s->last_dc[0] =
  2055. s->last_dc[1] =
  2056. s->last_dc[2] = 128 << s->intra_dc_precision;
  2057. }
  2058. }
  2059. else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
  2060. s->mbintra_table[mb_xy]=1;
  2061. if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
  2062. uint8_t *dest_y, *dest_cb, *dest_cr;
  2063. int dct_linesize, dct_offset;
  2064. op_pixels_func (*op_pix)[4];
  2065. qpel_mc_func (*op_qpix)[16];
  2066. const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
  2067. const int uvlinesize = s->current_picture.f.linesize[1];
  2068. const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
  2069. const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
  2070. /* avoid copy if macroblock skipped in last frame too */
  2071. /* skip only during decoding as we might trash the buffers during encoding a bit */
  2072. if(!s->encoding){
  2073. uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
  2074. if (s->mb_skipped) {
  2075. s->mb_skipped= 0;
  2076. av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
  2077. *mbskip_ptr = 1;
  2078. } else if(!s->current_picture.f.reference) {
  2079. *mbskip_ptr = 1;
  2080. } else{
  2081. *mbskip_ptr = 0; /* not skipped */
  2082. }
  2083. }
  2084. dct_linesize = linesize << s->interlaced_dct;
  2085. dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
  2086. if(readable){
  2087. dest_y= s->dest[0];
  2088. dest_cb= s->dest[1];
  2089. dest_cr= s->dest[2];
  2090. }else{
  2091. dest_y = s->b_scratchpad;
  2092. dest_cb= s->b_scratchpad+16*linesize;
  2093. dest_cr= s->b_scratchpad+32*linesize;
  2094. }
  2095. if (!s->mb_intra) {
  2096. /* motion handling */
  2097. /* decoding or more than one mb_type (MC was already done otherwise) */
  2098. if(!s->encoding){
  2099. if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
  2100. if (s->mv_dir & MV_DIR_FORWARD) {
  2101. ff_thread_await_progress(&s->last_picture_ptr->f,
  2102. ff_MPV_lowest_referenced_row(s, 0),
  2103. 0);
  2104. }
  2105. if (s->mv_dir & MV_DIR_BACKWARD) {
  2106. ff_thread_await_progress(&s->next_picture_ptr->f,
  2107. ff_MPV_lowest_referenced_row(s, 1),
  2108. 0);
  2109. }
  2110. }
  2111. if(lowres_flag){
  2112. h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
  2113. if (s->mv_dir & MV_DIR_FORWARD) {
  2114. MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
  2115. op_pix = s->dsp.avg_h264_chroma_pixels_tab;
  2116. }
  2117. if (s->mv_dir & MV_DIR_BACKWARD) {
  2118. MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
  2119. }
  2120. }else{
  2121. op_qpix= s->me.qpel_put;
  2122. if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
  2123. op_pix = s->dsp.put_pixels_tab;
  2124. }else{
  2125. op_pix = s->dsp.put_no_rnd_pixels_tab;
  2126. }
  2127. if (s->mv_dir & MV_DIR_FORWARD) {
  2128. ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
  2129. op_pix = s->dsp.avg_pixels_tab;
  2130. op_qpix= s->me.qpel_avg;
  2131. }
  2132. if (s->mv_dir & MV_DIR_BACKWARD) {
  2133. ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
  2134. }
  2135. }
  2136. }
  2137. /* skip dequant / idct if we are really late ;) */
  2138. if(s->avctx->skip_idct){
  2139. if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
  2140. ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
  2141. || s->avctx->skip_idct >= AVDISCARD_ALL)
  2142. goto skip_idct;
  2143. }
  2144. /* add dct residue */
  2145. if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
  2146. || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
  2147. add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
  2148. add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
  2149. add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
  2150. add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
  2151. if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
  2152. if (s->chroma_y_shift){
  2153. add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
  2154. add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
  2155. }else{
  2156. dct_linesize >>= 1;
  2157. dct_offset >>=1;
  2158. add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
  2159. add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
  2160. add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
  2161. add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
  2162. }
  2163. }
  2164. } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
  2165. add_dct(s, block[0], 0, dest_y , dct_linesize);
  2166. add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
  2167. add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
  2168. add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
  2169. if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
  2170. if(s->chroma_y_shift){//Chroma420
  2171. add_dct(s, block[4], 4, dest_cb, uvlinesize);
  2172. add_dct(s, block[5], 5, dest_cr, uvlinesize);
  2173. }else{
  2174. //chroma422
  2175. dct_linesize = uvlinesize << s->interlaced_dct;
  2176. dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
  2177. add_dct(s, block[4], 4, dest_cb, dct_linesize);
  2178. add_dct(s, block[5], 5, dest_cr, dct_linesize);
  2179. add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
  2180. add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
  2181. if(!s->chroma_x_shift){//Chroma444
  2182. add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
  2183. add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
  2184. add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
  2185. add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
  2186. }
  2187. }
  2188. }//fi gray
  2189. }
  2190. else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
  2191. ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
  2192. }
  2193. } else {
  2194. /* dct only in intra block */
  2195. if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
  2196. put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
  2197. put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
  2198. put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
  2199. put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
  2200. if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
  2201. if(s->chroma_y_shift){
  2202. put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
  2203. put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
  2204. }else{
  2205. dct_offset >>=1;
  2206. dct_linesize >>=1;
  2207. put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
  2208. put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
  2209. put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
  2210. put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
  2211. }
  2212. }
  2213. }else{
  2214. s->dsp.idct_put(dest_y , dct_linesize, block[0]);
  2215. s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
  2216. s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
  2217. s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
  2218. if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
  2219. if(s->chroma_y_shift){
  2220. s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
  2221. s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
  2222. }else{
  2223. dct_linesize = uvlinesize << s->interlaced_dct;
  2224. dct_offset = s->interlaced_dct? uvlinesize : uvlinesize*block_size;
  2225. s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
  2226. s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
  2227. s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
  2228. s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
  2229. if(!s->chroma_x_shift){//Chroma444
  2230. s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
  2231. s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
  2232. s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
  2233. s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
  2234. }
  2235. }
  2236. }//gray
  2237. }
  2238. }
  2239. skip_idct:
  2240. if(!readable){
  2241. s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
  2242. s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
  2243. s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
  2244. }
  2245. }
  2246. }
  2247. void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
  2248. #if !CONFIG_SMALL
  2249. if(s->out_format == FMT_MPEG1) {
  2250. if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
  2251. else MPV_decode_mb_internal(s, block, 0, 1);
  2252. } else
  2253. #endif
  2254. if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
  2255. else MPV_decode_mb_internal(s, block, 0, 0);
  2256. }
  2257. /**
  2258. * @param h is the normal height, this will be reduced automatically if needed for the last row
  2259. */
  2260. void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
  2261. const int field_pic= s->picture_structure != PICT_FRAME;
  2262. if(field_pic){
  2263. h <<= 1;
  2264. y <<= 1;
  2265. }
  2266. if (!s->avctx->hwaccel
  2267. && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
  2268. && s->unrestricted_mv
  2269. && s->current_picture.f.reference
  2270. && !s->intra_only
  2271. && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
  2272. int sides = 0, edge_h;
  2273. int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
  2274. int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
  2275. if (y==0) sides |= EDGE_TOP;
  2276. if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
  2277. edge_h= FFMIN(h, s->v_edge_pos - y);
  2278. s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
  2279. s->linesize, s->h_edge_pos, edge_h,
  2280. EDGE_WIDTH, EDGE_WIDTH, sides);
  2281. s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
  2282. s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
  2283. EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
  2284. s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
  2285. s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
  2286. EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
  2287. }
  2288. h= FFMIN(h, s->avctx->height - y);
  2289. if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
  2290. if (s->avctx->draw_horiz_band) {
  2291. AVFrame *src;
  2292. int offset[AV_NUM_DATA_POINTERS];
  2293. int i;
  2294. if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
  2295. src = &s->current_picture_ptr->f;
  2296. else if(s->last_picture_ptr)
  2297. src = &s->last_picture_ptr->f;
  2298. else
  2299. return;
  2300. if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
  2301. for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
  2302. offset[i] = 0;
  2303. }else{
  2304. offset[0]= y * s->linesize;
  2305. offset[1]=
  2306. offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
  2307. for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
  2308. offset[i] = 0;
  2309. }
  2310. emms_c();
  2311. s->avctx->draw_horiz_band(s->avctx, src, offset,
  2312. y, s->picture_structure, h);
  2313. }
  2314. }
  2315. void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
  2316. const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
  2317. const int uvlinesize = s->current_picture.f.linesize[1];
  2318. const int mb_size= 4 - s->avctx->lowres;
  2319. s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
  2320. s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
  2321. s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
  2322. s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
  2323. s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
  2324. s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
  2325. //block_index is not used by mpeg2, so it is not affected by chroma_format
  2326. s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
  2327. s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
  2328. s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
  2329. if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
  2330. {
  2331. if(s->picture_structure==PICT_FRAME){
  2332. s->dest[0] += s->mb_y * linesize << mb_size;
  2333. s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
  2334. s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
  2335. }else{
  2336. s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
  2337. s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
  2338. s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
  2339. av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
  2340. }
  2341. }
  2342. }
  2343. void ff_mpeg_flush(AVCodecContext *avctx){
  2344. int i;
  2345. MpegEncContext *s = avctx->priv_data;
  2346. if(s==NULL || s->picture==NULL)
  2347. return;
  2348. for(i=0; i<s->picture_count; i++){
  2349. if (s->picture[i].f.data[0] &&
  2350. (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
  2351. s->picture[i].f.type == FF_BUFFER_TYPE_USER))
  2352. free_frame_buffer(s, &s->picture[i]);
  2353. }
  2354. s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
  2355. s->mb_x= s->mb_y= 0;
  2356. s->closed_gop= 0;
  2357. s->parse_context.state= -1;
  2358. s->parse_context.frame_start_found= 0;
  2359. s->parse_context.overread= 0;
  2360. s->parse_context.overread_index= 0;
  2361. s->parse_context.index= 0;
  2362. s->parse_context.last_index= 0;
  2363. s->bitstream_buffer_size=0;
  2364. s->pp_time=0;
  2365. }
  2366. static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
  2367. DCTELEM *block, int n, int qscale)
  2368. {
  2369. int i, level, nCoeffs;
  2370. const uint16_t *quant_matrix;
  2371. nCoeffs= s->block_last_index[n];
  2372. block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
  2373. /* XXX: only mpeg1 */
  2374. quant_matrix = s->intra_matrix;
  2375. for(i=1;i<=nCoeffs;i++) {
  2376. int j= s->intra_scantable.permutated[i];
  2377. level = block[j];
  2378. if (level) {
  2379. if (level < 0) {
  2380. level = -level;
  2381. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  2382. level = (level - 1) | 1;
  2383. level = -level;
  2384. } else {
  2385. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  2386. level = (level - 1) | 1;
  2387. }
  2388. block[j] = level;
  2389. }
  2390. }
  2391. }
  2392. static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
  2393. DCTELEM *block, int n, int qscale)
  2394. {
  2395. int i, level, nCoeffs;
  2396. const uint16_t *quant_matrix;
  2397. nCoeffs= s->block_last_index[n];
  2398. quant_matrix = s->inter_matrix;
  2399. for(i=0; i<=nCoeffs; i++) {
  2400. int j= s->intra_scantable.permutated[i];
  2401. level = block[j];
  2402. if (level) {
  2403. if (level < 0) {
  2404. level = -level;
  2405. level = (((level << 1) + 1) * qscale *
  2406. ((int) (quant_matrix[j]))) >> 4;
  2407. level = (level - 1) | 1;
  2408. level = -level;
  2409. } else {
  2410. level = (((level << 1) + 1) * qscale *
  2411. ((int) (quant_matrix[j]))) >> 4;
  2412. level = (level - 1) | 1;
  2413. }
  2414. block[j] = level;
  2415. }
  2416. }
  2417. }
  2418. static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
  2419. DCTELEM *block, int n, int qscale)
  2420. {
  2421. int i, level, nCoeffs;
  2422. const uint16_t *quant_matrix;
  2423. if(s->alternate_scan) nCoeffs= 63;
  2424. else nCoeffs= s->block_last_index[n];
  2425. block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
  2426. quant_matrix = s->intra_matrix;
  2427. for(i=1;i<=nCoeffs;i++) {
  2428. int j= s->intra_scantable.permutated[i];
  2429. level = block[j];
  2430. if (level) {
  2431. if (level < 0) {
  2432. level = -level;
  2433. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  2434. level = -level;
  2435. } else {
  2436. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  2437. }
  2438. block[j] = level;
  2439. }
  2440. }
  2441. }
  2442. static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
  2443. DCTELEM *block, int n, int qscale)
  2444. {
  2445. int i, level, nCoeffs;
  2446. const uint16_t *quant_matrix;
  2447. int sum=-1;
  2448. if(s->alternate_scan) nCoeffs= 63;
  2449. else nCoeffs= s->block_last_index[n];
  2450. block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
  2451. sum += block[0];
  2452. quant_matrix = s->intra_matrix;
  2453. for(i=1;i<=nCoeffs;i++) {
  2454. int j= s->intra_scantable.permutated[i];
  2455. level = block[j];
  2456. if (level) {
  2457. if (level < 0) {
  2458. level = -level;
  2459. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  2460. level = -level;
  2461. } else {
  2462. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  2463. }
  2464. block[j] = level;
  2465. sum+=level;
  2466. }
  2467. }
  2468. block[63]^=sum&1;
  2469. }
  2470. static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
  2471. DCTELEM *block, int n, int qscale)
  2472. {
  2473. int i, level, nCoeffs;
  2474. const uint16_t *quant_matrix;
  2475. int sum=-1;
  2476. if(s->alternate_scan) nCoeffs= 63;
  2477. else nCoeffs= s->block_last_index[n];
  2478. quant_matrix = s->inter_matrix;
  2479. for(i=0; i<=nCoeffs; i++) {
  2480. int j= s->intra_scantable.permutated[i];
  2481. level = block[j];
  2482. if (level) {
  2483. if (level < 0) {
  2484. level = -level;
  2485. level = (((level << 1) + 1) * qscale *
  2486. ((int) (quant_matrix[j]))) >> 4;
  2487. level = -level;
  2488. } else {
  2489. level = (((level << 1) + 1) * qscale *
  2490. ((int) (quant_matrix[j]))) >> 4;
  2491. }
  2492. block[j] = level;
  2493. sum+=level;
  2494. }
  2495. }
  2496. block[63]^=sum&1;
  2497. }
  2498. static void dct_unquantize_h263_intra_c(MpegEncContext *s,
  2499. DCTELEM *block, int n, int qscale)
  2500. {
  2501. int i, level, qmul, qadd;
  2502. int nCoeffs;
  2503. assert(s->block_last_index[n]>=0);
  2504. qmul = qscale << 1;
  2505. if (!s->h263_aic) {
  2506. block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
  2507. qadd = (qscale - 1) | 1;
  2508. }else{
  2509. qadd = 0;
  2510. }
  2511. if(s->ac_pred)
  2512. nCoeffs=63;
  2513. else
  2514. nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
  2515. for(i=1; i<=nCoeffs; i++) {
  2516. level = block[i];
  2517. if (level) {
  2518. if (level < 0) {
  2519. level = level * qmul - qadd;
  2520. } else {
  2521. level = level * qmul + qadd;
  2522. }
  2523. block[i] = level;
  2524. }
  2525. }
  2526. }
  2527. static void dct_unquantize_h263_inter_c(MpegEncContext *s,
  2528. DCTELEM *block, int n, int qscale)
  2529. {
  2530. int i, level, qmul, qadd;
  2531. int nCoeffs;
  2532. assert(s->block_last_index[n]>=0);
  2533. qadd = (qscale - 1) | 1;
  2534. qmul = qscale << 1;
  2535. nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
  2536. for(i=0; i<=nCoeffs; i++) {
  2537. level = block[i];
  2538. if (level) {
  2539. if (level < 0) {
  2540. level = level * qmul - qadd;
  2541. } else {
  2542. level = level * qmul + qadd;
  2543. }
  2544. block[i] = level;
  2545. }
  2546. }
  2547. }
  2548. /**
  2549. * set qscale and update qscale dependent variables.
  2550. */
  2551. void ff_set_qscale(MpegEncContext * s, int qscale)
  2552. {
  2553. if (qscale < 1)
  2554. qscale = 1;
  2555. else if (qscale > 31)
  2556. qscale = 31;
  2557. s->qscale = qscale;
  2558. s->chroma_qscale= s->chroma_qscale_table[qscale];
  2559. s->y_dc_scale= s->y_dc_scale_table[ qscale ];
  2560. s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
  2561. }
  2562. void ff_MPV_report_decode_progress(MpegEncContext *s)
  2563. {
  2564. if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
  2565. ff_thread_report_progress(&s->current_picture_ptr->f, s->mb_y, 0);
  2566. }