You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2746 lines
100KB

  1. /*
  2. * The simplest mpeg encoder (well, it was the simplest!)
  3. * Copyright (c) 2000,2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
  7. *
  8. * This file is part of Libav.
  9. *
  10. * Libav is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * Libav is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with Libav; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. /**
  25. * @file
  26. * The simplest mpeg encoder (well, it was the simplest!).
  27. */
  28. #include "libavutil/imgutils.h"
  29. #include "avcodec.h"
  30. #include "dsputil.h"
  31. #include "internal.h"
  32. #include "mathops.h"
  33. #include "mpegvideo.h"
  34. #include "mjpegenc.h"
  35. #include "msmpeg4.h"
  36. #include "xvmc_internal.h"
  37. #include "thread.h"
  38. #include <limits.h>
  39. //#undef NDEBUG
  40. //#include <assert.h>
  41. static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
  42. DCTELEM *block, int n, int qscale);
  43. static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
  44. DCTELEM *block, int n, int qscale);
  45. static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
  46. DCTELEM *block, int n, int qscale);
  47. static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
  48. DCTELEM *block, int n, int qscale);
  49. static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
  50. DCTELEM *block, int n, int qscale);
  51. static void dct_unquantize_h263_intra_c(MpegEncContext *s,
  52. DCTELEM *block, int n, int qscale);
  53. static void dct_unquantize_h263_inter_c(MpegEncContext *s,
  54. DCTELEM *block, int n, int qscale);
  55. /* enable all paranoid tests for rounding, overflows, etc... */
  56. //#define PARANOID
  57. //#define DEBUG
  58. static const uint8_t ff_default_chroma_qscale_table[32] = {
  59. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  60. 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
  61. 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
  62. };
  63. const uint8_t ff_mpeg1_dc_scale_table[128] = {
  64. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  65. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  66. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  67. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  68. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  69. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  70. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  71. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  72. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  73. };
  74. static const uint8_t mpeg2_dc_scale_table1[128] = {
  75. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  76. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  77. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  78. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  79. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  80. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  81. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  82. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  83. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  84. };
  85. static const uint8_t mpeg2_dc_scale_table2[128] = {
  86. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  87. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  88. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  89. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  90. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  91. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  92. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  93. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  94. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  95. };
  96. static const uint8_t mpeg2_dc_scale_table3[128] = {
  97. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  98. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  99. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  100. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  101. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  102. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  103. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  104. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  105. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  106. };
  107. const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
  108. ff_mpeg1_dc_scale_table,
  109. mpeg2_dc_scale_table1,
  110. mpeg2_dc_scale_table2,
  111. mpeg2_dc_scale_table3,
  112. };
  113. const enum AVPixelFormat ff_pixfmt_list_420[] = {
  114. AV_PIX_FMT_YUV420P,
  115. AV_PIX_FMT_NONE
  116. };
  117. const enum AVPixelFormat ff_hwaccel_pixfmt_list_420[] = {
  118. AV_PIX_FMT_DXVA2_VLD,
  119. AV_PIX_FMT_VAAPI_VLD,
  120. AV_PIX_FMT_VDA_VLD,
  121. AV_PIX_FMT_YUV420P,
  122. AV_PIX_FMT_NONE
  123. };
  124. const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
  125. const uint8_t *end,
  126. uint32_t * restrict state)
  127. {
  128. int i;
  129. assert(p <= end);
  130. if (p >= end)
  131. return end;
  132. for (i = 0; i < 3; i++) {
  133. uint32_t tmp = *state << 8;
  134. *state = tmp + *(p++);
  135. if (tmp == 0x100 || p == end)
  136. return p;
  137. }
  138. while (p < end) {
  139. if (p[-1] > 1 ) p += 3;
  140. else if (p[-2] ) p += 2;
  141. else if (p[-3]|(p[-1]-1)) p++;
  142. else {
  143. p++;
  144. break;
  145. }
  146. }
  147. p = FFMIN(p, end) - 4;
  148. *state = AV_RB32(p);
  149. return p + 4;
  150. }
  151. /* init common dct for both encoder and decoder */
  152. av_cold int ff_dct_common_init(MpegEncContext *s)
  153. {
  154. ff_dsputil_init(&s->dsp, s->avctx);
  155. s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
  156. s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
  157. s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
  158. s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
  159. s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
  160. if (s->flags & CODEC_FLAG_BITEXACT)
  161. s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
  162. s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
  163. #if ARCH_X86
  164. ff_MPV_common_init_x86(s);
  165. #elif ARCH_ALPHA
  166. ff_MPV_common_init_axp(s);
  167. #elif ARCH_ARM
  168. ff_MPV_common_init_arm(s);
  169. #elif HAVE_ALTIVEC
  170. ff_MPV_common_init_altivec(s);
  171. #elif ARCH_BFIN
  172. ff_MPV_common_init_bfin(s);
  173. #endif
  174. /* load & permutate scantables
  175. * note: only wmv uses different ones
  176. */
  177. if (s->alternate_scan) {
  178. ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
  179. ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
  180. } else {
  181. ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
  182. ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
  183. }
  184. ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
  185. ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
  186. return 0;
  187. }
  188. void ff_copy_picture(Picture *dst, Picture *src)
  189. {
  190. *dst = *src;
  191. dst->f.type = FF_BUFFER_TYPE_COPY;
  192. }
  193. /**
  194. * Release a frame buffer
  195. */
  196. static void free_frame_buffer(MpegEncContext *s, Picture *pic)
  197. {
  198. /* WM Image / Screen codecs allocate internal buffers with different
  199. * dimensions / colorspaces; ignore user-defined callbacks for these. */
  200. if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
  201. s->codec_id != AV_CODEC_ID_VC1IMAGE &&
  202. s->codec_id != AV_CODEC_ID_MSS2)
  203. ff_thread_release_buffer(s->avctx, &pic->f);
  204. else
  205. avcodec_default_release_buffer(s->avctx, &pic->f);
  206. av_freep(&pic->f.hwaccel_picture_private);
  207. }
  208. int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
  209. {
  210. int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
  211. // edge emu needs blocksize + filter length - 1
  212. // (= 17x17 for halfpel / 21x21 for h264)
  213. // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
  214. // at uvlinesize. It supports only YUV420 so 24x24 is enough
  215. // linesize * interlaced * MBsize
  216. FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 2 * 24,
  217. fail);
  218. FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 2,
  219. fail)
  220. s->me.temp = s->me.scratchpad;
  221. s->rd_scratchpad = s->me.scratchpad;
  222. s->b_scratchpad = s->me.scratchpad;
  223. s->obmc_scratchpad = s->me.scratchpad + 16;
  224. return 0;
  225. fail:
  226. av_freep(&s->edge_emu_buffer);
  227. return AVERROR(ENOMEM);
  228. }
  229. /**
  230. * Allocate a frame buffer
  231. */
  232. static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
  233. {
  234. int r, ret;
  235. if (s->avctx->hwaccel) {
  236. assert(!pic->f.hwaccel_picture_private);
  237. if (s->avctx->hwaccel->priv_data_size) {
  238. pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
  239. if (!pic->f.hwaccel_picture_private) {
  240. av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
  241. return -1;
  242. }
  243. }
  244. }
  245. if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
  246. s->codec_id != AV_CODEC_ID_VC1IMAGE &&
  247. s->codec_id != AV_CODEC_ID_MSS2)
  248. r = ff_thread_get_buffer(s->avctx, &pic->f);
  249. else
  250. r = avcodec_default_get_buffer(s->avctx, &pic->f);
  251. if (r < 0 || !pic->f.type || !pic->f.data[0]) {
  252. av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
  253. r, pic->f.type, pic->f.data[0]);
  254. av_freep(&pic->f.hwaccel_picture_private);
  255. return -1;
  256. }
  257. if (s->linesize && (s->linesize != pic->f.linesize[0] ||
  258. s->uvlinesize != pic->f.linesize[1])) {
  259. av_log(s->avctx, AV_LOG_ERROR,
  260. "get_buffer() failed (stride changed)\n");
  261. free_frame_buffer(s, pic);
  262. return -1;
  263. }
  264. if (pic->f.linesize[1] != pic->f.linesize[2]) {
  265. av_log(s->avctx, AV_LOG_ERROR,
  266. "get_buffer() failed (uv stride mismatch)\n");
  267. free_frame_buffer(s, pic);
  268. return -1;
  269. }
  270. if (!s->edge_emu_buffer &&
  271. (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
  272. av_log(s->avctx, AV_LOG_ERROR,
  273. "get_buffer() failed to allocate context scratch buffers.\n");
  274. free_frame_buffer(s, pic);
  275. return ret;
  276. }
  277. return 0;
  278. }
  279. /**
  280. * Allocate a Picture.
  281. * The pixels are allocated/set by calling get_buffer() if shared = 0
  282. */
  283. int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
  284. {
  285. const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
  286. // the + 1 is needed so memset(,,stride*height) does not sig11
  287. const int mb_array_size = s->mb_stride * s->mb_height;
  288. const int b8_array_size = s->b8_stride * s->mb_height * 2;
  289. const int b4_array_size = s->b4_stride * s->mb_height * 4;
  290. int i;
  291. int r = -1;
  292. if (shared) {
  293. assert(pic->f.data[0]);
  294. assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
  295. pic->f.type = FF_BUFFER_TYPE_SHARED;
  296. } else {
  297. assert(!pic->f.data[0]);
  298. if (alloc_frame_buffer(s, pic) < 0)
  299. return -1;
  300. s->linesize = pic->f.linesize[0];
  301. s->uvlinesize = pic->f.linesize[1];
  302. }
  303. if (pic->f.qscale_table == NULL) {
  304. if (s->encoding) {
  305. FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
  306. mb_array_size * sizeof(int16_t), fail)
  307. FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
  308. mb_array_size * sizeof(int16_t), fail)
  309. FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
  310. mb_array_size * sizeof(int8_t ), fail)
  311. }
  312. FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
  313. mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
  314. FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
  315. (big_mb_num + s->mb_stride) * sizeof(uint8_t),
  316. fail)
  317. FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
  318. (big_mb_num + s->mb_stride) * sizeof(uint32_t),
  319. fail)
  320. pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
  321. pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
  322. if (s->out_format == FMT_H264) {
  323. for (i = 0; i < 2; i++) {
  324. FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
  325. 2 * (b4_array_size + 4) * sizeof(int16_t),
  326. fail)
  327. pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
  328. FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
  329. 4 * mb_array_size * sizeof(uint8_t), fail)
  330. }
  331. pic->f.motion_subsample_log2 = 2;
  332. } else if (s->out_format == FMT_H263 || s->encoding ||
  333. (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
  334. for (i = 0; i < 2; i++) {
  335. FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
  336. 2 * (b8_array_size + 4) * sizeof(int16_t),
  337. fail)
  338. pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
  339. FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
  340. 4 * mb_array_size * sizeof(uint8_t), fail)
  341. }
  342. pic->f.motion_subsample_log2 = 3;
  343. }
  344. if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
  345. FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
  346. 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
  347. }
  348. pic->f.qstride = s->mb_stride;
  349. FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
  350. 1 * sizeof(AVPanScan), fail)
  351. }
  352. pic->owner2 = s;
  353. return 0;
  354. fail: // for the FF_ALLOCZ_OR_GOTO macro
  355. if (r >= 0)
  356. free_frame_buffer(s, pic);
  357. return -1;
  358. }
  359. /**
  360. * Deallocate a picture.
  361. */
  362. static void free_picture(MpegEncContext *s, Picture *pic)
  363. {
  364. int i;
  365. if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
  366. free_frame_buffer(s, pic);
  367. }
  368. av_freep(&pic->mb_var);
  369. av_freep(&pic->mc_mb_var);
  370. av_freep(&pic->mb_mean);
  371. av_freep(&pic->f.mbskip_table);
  372. av_freep(&pic->qscale_table_base);
  373. pic->f.qscale_table = NULL;
  374. av_freep(&pic->mb_type_base);
  375. pic->f.mb_type = NULL;
  376. av_freep(&pic->f.dct_coeff);
  377. av_freep(&pic->f.pan_scan);
  378. pic->f.mb_type = NULL;
  379. for (i = 0; i < 2; i++) {
  380. av_freep(&pic->motion_val_base[i]);
  381. av_freep(&pic->f.ref_index[i]);
  382. pic->f.motion_val[i] = NULL;
  383. }
  384. if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
  385. for (i = 0; i < 4; i++) {
  386. pic->f.base[i] =
  387. pic->f.data[i] = NULL;
  388. }
  389. pic->f.type = 0;
  390. }
  391. }
  392. static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
  393. {
  394. int y_size = s->b8_stride * (2 * s->mb_height + 1);
  395. int c_size = s->mb_stride * (s->mb_height + 1);
  396. int yc_size = y_size + 2 * c_size;
  397. int i;
  398. s->edge_emu_buffer =
  399. s->me.scratchpad =
  400. s->me.temp =
  401. s->rd_scratchpad =
  402. s->b_scratchpad =
  403. s->obmc_scratchpad = NULL;
  404. if (s->encoding) {
  405. FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
  406. ME_MAP_SIZE * sizeof(uint32_t), fail)
  407. FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
  408. ME_MAP_SIZE * sizeof(uint32_t), fail)
  409. if (s->avctx->noise_reduction) {
  410. FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
  411. 2 * 64 * sizeof(int), fail)
  412. }
  413. }
  414. FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
  415. s->block = s->blocks[0];
  416. for (i = 0; i < 12; i++) {
  417. s->pblocks[i] = &s->block[i];
  418. }
  419. if (s->out_format == FMT_H263) {
  420. /* ac values */
  421. FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
  422. yc_size * sizeof(int16_t) * 16, fail);
  423. s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
  424. s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
  425. s->ac_val[2] = s->ac_val[1] + c_size;
  426. }
  427. return 0;
  428. fail:
  429. return -1; // free() through ff_MPV_common_end()
  430. }
  431. static void free_duplicate_context(MpegEncContext *s)
  432. {
  433. if (s == NULL)
  434. return;
  435. av_freep(&s->edge_emu_buffer);
  436. av_freep(&s->me.scratchpad);
  437. s->me.temp =
  438. s->rd_scratchpad =
  439. s->b_scratchpad =
  440. s->obmc_scratchpad = NULL;
  441. av_freep(&s->dct_error_sum);
  442. av_freep(&s->me.map);
  443. av_freep(&s->me.score_map);
  444. av_freep(&s->blocks);
  445. av_freep(&s->ac_val_base);
  446. s->block = NULL;
  447. }
  448. static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
  449. {
  450. #define COPY(a) bak->a = src->a
  451. COPY(edge_emu_buffer);
  452. COPY(me.scratchpad);
  453. COPY(me.temp);
  454. COPY(rd_scratchpad);
  455. COPY(b_scratchpad);
  456. COPY(obmc_scratchpad);
  457. COPY(me.map);
  458. COPY(me.score_map);
  459. COPY(blocks);
  460. COPY(block);
  461. COPY(start_mb_y);
  462. COPY(end_mb_y);
  463. COPY(me.map_generation);
  464. COPY(pb);
  465. COPY(dct_error_sum);
  466. COPY(dct_count[0]);
  467. COPY(dct_count[1]);
  468. COPY(ac_val_base);
  469. COPY(ac_val[0]);
  470. COPY(ac_val[1]);
  471. COPY(ac_val[2]);
  472. #undef COPY
  473. }
  474. int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
  475. {
  476. MpegEncContext bak;
  477. int i, ret;
  478. // FIXME copy only needed parts
  479. // START_TIMER
  480. backup_duplicate_context(&bak, dst);
  481. memcpy(dst, src, sizeof(MpegEncContext));
  482. backup_duplicate_context(dst, &bak);
  483. for (i = 0; i < 12; i++) {
  484. dst->pblocks[i] = &dst->block[i];
  485. }
  486. if (!dst->edge_emu_buffer &&
  487. (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
  488. av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
  489. "scratch buffers.\n");
  490. return ret;
  491. }
  492. // STOP_TIMER("update_duplicate_context")
  493. // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
  494. return 0;
  495. }
  496. int ff_mpeg_update_thread_context(AVCodecContext *dst,
  497. const AVCodecContext *src)
  498. {
  499. int i;
  500. MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
  501. if (dst == src || !s1->context_initialized)
  502. return 0;
  503. // FIXME can parameters change on I-frames?
  504. // in that case dst may need a reinit
  505. if (!s->context_initialized) {
  506. memcpy(s, s1, sizeof(MpegEncContext));
  507. s->avctx = dst;
  508. s->picture_range_start += MAX_PICTURE_COUNT;
  509. s->picture_range_end += MAX_PICTURE_COUNT;
  510. s->bitstream_buffer = NULL;
  511. s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
  512. ff_MPV_common_init(s);
  513. }
  514. if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
  515. int err;
  516. s->context_reinit = 0;
  517. s->height = s1->height;
  518. s->width = s1->width;
  519. if ((err = ff_MPV_common_frame_size_change(s)) < 0)
  520. return err;
  521. }
  522. s->avctx->coded_height = s1->avctx->coded_height;
  523. s->avctx->coded_width = s1->avctx->coded_width;
  524. s->avctx->width = s1->avctx->width;
  525. s->avctx->height = s1->avctx->height;
  526. s->coded_picture_number = s1->coded_picture_number;
  527. s->picture_number = s1->picture_number;
  528. s->input_picture_number = s1->input_picture_number;
  529. memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
  530. memcpy(&s->last_picture, &s1->last_picture,
  531. (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
  532. // reset s->picture[].f.extended_data to s->picture[].f.data
  533. for (i = 0; i < s->picture_count; i++)
  534. s->picture[i].f.extended_data = s->picture[i].f.data;
  535. s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
  536. s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
  537. s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
  538. // Error/bug resilience
  539. s->next_p_frame_damaged = s1->next_p_frame_damaged;
  540. s->workaround_bugs = s1->workaround_bugs;
  541. // MPEG4 timing info
  542. memcpy(&s->time_increment_bits, &s1->time_increment_bits,
  543. (char *) &s1->shape - (char *) &s1->time_increment_bits);
  544. // B-frame info
  545. s->max_b_frames = s1->max_b_frames;
  546. s->low_delay = s1->low_delay;
  547. s->droppable = s1->droppable;
  548. // DivX handling (doesn't work)
  549. s->divx_packed = s1->divx_packed;
  550. if (s1->bitstream_buffer) {
  551. if (s1->bitstream_buffer_size +
  552. FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
  553. av_fast_malloc(&s->bitstream_buffer,
  554. &s->allocated_bitstream_buffer_size,
  555. s1->allocated_bitstream_buffer_size);
  556. s->bitstream_buffer_size = s1->bitstream_buffer_size;
  557. memcpy(s->bitstream_buffer, s1->bitstream_buffer,
  558. s1->bitstream_buffer_size);
  559. memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
  560. FF_INPUT_BUFFER_PADDING_SIZE);
  561. }
  562. // linesize dependend scratch buffer allocation
  563. if (!s->edge_emu_buffer)
  564. if (s1->linesize) {
  565. if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
  566. av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
  567. "scratch buffers.\n");
  568. return AVERROR(ENOMEM);
  569. }
  570. } else {
  571. av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
  572. "be allocated due to unknown size.\n");
  573. return AVERROR_BUG;
  574. }
  575. // MPEG2/interlacing info
  576. memcpy(&s->progressive_sequence, &s1->progressive_sequence,
  577. (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
  578. if (!s1->first_field) {
  579. s->last_pict_type = s1->pict_type;
  580. if (s1->current_picture_ptr)
  581. s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
  582. if (s1->pict_type != AV_PICTURE_TYPE_B) {
  583. s->last_non_b_pict_type = s1->pict_type;
  584. }
  585. }
  586. return 0;
  587. }
  588. /**
  589. * Set the given MpegEncContext to common defaults
  590. * (same for encoding and decoding).
  591. * The changed fields will not depend upon the
  592. * prior state of the MpegEncContext.
  593. */
  594. void ff_MPV_common_defaults(MpegEncContext *s)
  595. {
  596. s->y_dc_scale_table =
  597. s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
  598. s->chroma_qscale_table = ff_default_chroma_qscale_table;
  599. s->progressive_frame = 1;
  600. s->progressive_sequence = 1;
  601. s->picture_structure = PICT_FRAME;
  602. s->coded_picture_number = 0;
  603. s->picture_number = 0;
  604. s->input_picture_number = 0;
  605. s->picture_in_gop_number = 0;
  606. s->f_code = 1;
  607. s->b_code = 1;
  608. s->picture_range_start = 0;
  609. s->picture_range_end = MAX_PICTURE_COUNT;
  610. s->slice_context_count = 1;
  611. }
  612. /**
  613. * Set the given MpegEncContext to defaults for decoding.
  614. * the changed fields will not depend upon
  615. * the prior state of the MpegEncContext.
  616. */
  617. void ff_MPV_decode_defaults(MpegEncContext *s)
  618. {
  619. ff_MPV_common_defaults(s);
  620. }
  621. /**
  622. * Initialize and allocates MpegEncContext fields dependent on the resolution.
  623. */
  624. static int init_context_frame(MpegEncContext *s)
  625. {
  626. int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
  627. s->mb_width = (s->width + 15) / 16;
  628. s->mb_stride = s->mb_width + 1;
  629. s->b8_stride = s->mb_width * 2 + 1;
  630. s->b4_stride = s->mb_width * 4 + 1;
  631. mb_array_size = s->mb_height * s->mb_stride;
  632. mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
  633. /* set default edge pos, will be overriden
  634. * in decode_header if needed */
  635. s->h_edge_pos = s->mb_width * 16;
  636. s->v_edge_pos = s->mb_height * 16;
  637. s->mb_num = s->mb_width * s->mb_height;
  638. s->block_wrap[0] =
  639. s->block_wrap[1] =
  640. s->block_wrap[2] =
  641. s->block_wrap[3] = s->b8_stride;
  642. s->block_wrap[4] =
  643. s->block_wrap[5] = s->mb_stride;
  644. y_size = s->b8_stride * (2 * s->mb_height + 1);
  645. c_size = s->mb_stride * (s->mb_height + 1);
  646. yc_size = y_size + 2 * c_size;
  647. FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
  648. fail); // error ressilience code looks cleaner with this
  649. for (y = 0; y < s->mb_height; y++)
  650. for (x = 0; x < s->mb_width; x++)
  651. s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
  652. s->mb_index2xy[s->mb_height * s->mb_width] =
  653. (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
  654. if (s->encoding) {
  655. /* Allocate MV tables */
  656. FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
  657. mv_table_size * 2 * sizeof(int16_t), fail);
  658. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
  659. mv_table_size * 2 * sizeof(int16_t), fail);
  660. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
  661. mv_table_size * 2 * sizeof(int16_t), fail);
  662. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
  663. mv_table_size * 2 * sizeof(int16_t), fail);
  664. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
  665. mv_table_size * 2 * sizeof(int16_t), fail);
  666. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
  667. mv_table_size * 2 * sizeof(int16_t), fail);
  668. s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
  669. s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
  670. s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
  671. s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
  672. s->mb_stride + 1;
  673. s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
  674. s->mb_stride + 1;
  675. s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
  676. /* Allocate MB type table */
  677. FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
  678. sizeof(uint16_t), fail); // needed for encoding
  679. FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
  680. sizeof(int), fail);
  681. FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
  682. mb_array_size * sizeof(float), fail);
  683. FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
  684. mb_array_size * sizeof(float), fail);
  685. }
  686. FF_ALLOC_OR_GOTO(s->avctx, s->er_temp_buffer,
  687. mb_array_size * sizeof(uint8_t), fail);
  688. FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table,
  689. mb_array_size * sizeof(uint8_t), fail);
  690. if (s->codec_id == AV_CODEC_ID_MPEG4 ||
  691. (s->flags & CODEC_FLAG_INTERLACED_ME)) {
  692. /* interlaced direct mode decoding tables */
  693. for (i = 0; i < 2; i++) {
  694. int j, k;
  695. for (j = 0; j < 2; j++) {
  696. for (k = 0; k < 2; k++) {
  697. FF_ALLOCZ_OR_GOTO(s->avctx,
  698. s->b_field_mv_table_base[i][j][k],
  699. mv_table_size * 2 * sizeof(int16_t),
  700. fail);
  701. s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
  702. s->mb_stride + 1;
  703. }
  704. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
  705. mb_array_size * 2 * sizeof(uint8_t), fail);
  706. FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
  707. mv_table_size * 2 * sizeof(int16_t), fail);
  708. s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
  709. + s->mb_stride + 1;
  710. }
  711. FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
  712. mb_array_size * 2 * sizeof(uint8_t), fail);
  713. }
  714. }
  715. if (s->out_format == FMT_H263) {
  716. /* cbp values */
  717. FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
  718. s->coded_block = s->coded_block_base + s->b8_stride + 1;
  719. /* cbp, ac_pred, pred_dir */
  720. FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
  721. mb_array_size * sizeof(uint8_t), fail);
  722. FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
  723. mb_array_size * sizeof(uint8_t), fail);
  724. }
  725. if (s->h263_pred || s->h263_plus || !s->encoding) {
  726. /* dc values */
  727. // MN: we need these for error resilience of intra-frames
  728. FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
  729. yc_size * sizeof(int16_t), fail);
  730. s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
  731. s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
  732. s->dc_val[2] = s->dc_val[1] + c_size;
  733. for (i = 0; i < yc_size; i++)
  734. s->dc_val_base[i] = 1024;
  735. }
  736. /* which mb is a intra block */
  737. FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
  738. memset(s->mbintra_table, 1, mb_array_size);
  739. /* init macroblock skip table */
  740. FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
  741. // Note the + 1 is for a quicker mpeg4 slice_end detection
  742. if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
  743. s->avctx->debug_mv) {
  744. s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
  745. 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
  746. s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
  747. 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
  748. s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
  749. 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
  750. }
  751. return 0;
  752. fail:
  753. return AVERROR(ENOMEM);
  754. }
  755. /**
  756. * init common structure for both encoder and decoder.
  757. * this assumes that some variables like width/height are already set
  758. */
  759. av_cold int ff_MPV_common_init(MpegEncContext *s)
  760. {
  761. int i;
  762. int nb_slices = (HAVE_THREADS &&
  763. s->avctx->active_thread_type & FF_THREAD_SLICE) ?
  764. s->avctx->thread_count : 1;
  765. if (s->encoding && s->avctx->slices)
  766. nb_slices = s->avctx->slices;
  767. if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
  768. s->mb_height = (s->height + 31) / 32 * 2;
  769. else if (s->codec_id != AV_CODEC_ID_H264)
  770. s->mb_height = (s->height + 15) / 16;
  771. if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
  772. av_log(s->avctx, AV_LOG_ERROR,
  773. "decoding to AV_PIX_FMT_NONE is not supported.\n");
  774. return -1;
  775. }
  776. if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
  777. int max_slices;
  778. if (s->mb_height)
  779. max_slices = FFMIN(MAX_THREADS, s->mb_height);
  780. else
  781. max_slices = MAX_THREADS;
  782. av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
  783. " reducing to %d\n", nb_slices, max_slices);
  784. nb_slices = max_slices;
  785. }
  786. if ((s->width || s->height) &&
  787. av_image_check_size(s->width, s->height, 0, s->avctx))
  788. return -1;
  789. ff_dct_common_init(s);
  790. s->flags = s->avctx->flags;
  791. s->flags2 = s->avctx->flags2;
  792. if (s->width && s->height) {
  793. /* set chroma shifts */
  794. av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
  795. &s->chroma_x_shift,
  796. &s->chroma_y_shift);
  797. /* convert fourcc to upper case */
  798. s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
  799. s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
  800. s->avctx->coded_frame = &s->current_picture.f;
  801. if (s->encoding) {
  802. if (s->msmpeg4_version) {
  803. FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
  804. 2 * 2 * (MAX_LEVEL + 1) *
  805. (MAX_RUN + 1) * 2 * sizeof(int), fail);
  806. }
  807. FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
  808. FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
  809. 64 * 32 * sizeof(int), fail);
  810. FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
  811. 64 * 32 * sizeof(int), fail);
  812. FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
  813. 64 * 32 * 2 * sizeof(uint16_t), fail);
  814. FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
  815. 64 * 32 * 2 * sizeof(uint16_t), fail);
  816. FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
  817. MAX_PICTURE_COUNT * sizeof(Picture *), fail);
  818. FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
  819. MAX_PICTURE_COUNT * sizeof(Picture *), fail);
  820. if (s->avctx->noise_reduction) {
  821. FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
  822. 2 * 64 * sizeof(uint16_t), fail);
  823. }
  824. }
  825. }
  826. s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
  827. FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
  828. s->picture_count * sizeof(Picture), fail);
  829. for (i = 0; i < s->picture_count; i++) {
  830. avcodec_get_frame_defaults(&s->picture[i].f);
  831. }
  832. if (s->width && s->height) {
  833. if (init_context_frame(s))
  834. goto fail;
  835. s->parse_context.state = -1;
  836. }
  837. s->context_initialized = 1;
  838. s->thread_context[0] = s;
  839. if (s->width && s->height) {
  840. if (nb_slices > 1) {
  841. for (i = 1; i < nb_slices; i++) {
  842. s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
  843. memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
  844. }
  845. for (i = 0; i < nb_slices; i++) {
  846. if (init_duplicate_context(s->thread_context[i], s) < 0)
  847. goto fail;
  848. s->thread_context[i]->start_mb_y =
  849. (s->mb_height * (i) + nb_slices / 2) / nb_slices;
  850. s->thread_context[i]->end_mb_y =
  851. (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
  852. }
  853. } else {
  854. if (init_duplicate_context(s, s) < 0)
  855. goto fail;
  856. s->start_mb_y = 0;
  857. s->end_mb_y = s->mb_height;
  858. }
  859. s->slice_context_count = nb_slices;
  860. }
  861. return 0;
  862. fail:
  863. ff_MPV_common_end(s);
  864. return -1;
  865. }
  866. /**
  867. * Frees and resets MpegEncContext fields depending on the resolution.
  868. * Is used during resolution changes to avoid a full reinitialization of the
  869. * codec.
  870. */
  871. static int free_context_frame(MpegEncContext *s)
  872. {
  873. int i, j, k;
  874. av_freep(&s->mb_type);
  875. av_freep(&s->p_mv_table_base);
  876. av_freep(&s->b_forw_mv_table_base);
  877. av_freep(&s->b_back_mv_table_base);
  878. av_freep(&s->b_bidir_forw_mv_table_base);
  879. av_freep(&s->b_bidir_back_mv_table_base);
  880. av_freep(&s->b_direct_mv_table_base);
  881. s->p_mv_table = NULL;
  882. s->b_forw_mv_table = NULL;
  883. s->b_back_mv_table = NULL;
  884. s->b_bidir_forw_mv_table = NULL;
  885. s->b_bidir_back_mv_table = NULL;
  886. s->b_direct_mv_table = NULL;
  887. for (i = 0; i < 2; i++) {
  888. for (j = 0; j < 2; j++) {
  889. for (k = 0; k < 2; k++) {
  890. av_freep(&s->b_field_mv_table_base[i][j][k]);
  891. s->b_field_mv_table[i][j][k] = NULL;
  892. }
  893. av_freep(&s->b_field_select_table[i][j]);
  894. av_freep(&s->p_field_mv_table_base[i][j]);
  895. s->p_field_mv_table[i][j] = NULL;
  896. }
  897. av_freep(&s->p_field_select_table[i]);
  898. }
  899. av_freep(&s->dc_val_base);
  900. av_freep(&s->coded_block_base);
  901. av_freep(&s->mbintra_table);
  902. av_freep(&s->cbp_table);
  903. av_freep(&s->pred_dir_table);
  904. av_freep(&s->mbskip_table);
  905. av_freep(&s->error_status_table);
  906. av_freep(&s->er_temp_buffer);
  907. av_freep(&s->mb_index2xy);
  908. av_freep(&s->lambda_table);
  909. av_freep(&s->cplx_tab);
  910. av_freep(&s->bits_tab);
  911. s->linesize = s->uvlinesize = 0;
  912. for (i = 0; i < 3; i++)
  913. av_freep(&s->visualization_buffer[i]);
  914. return 0;
  915. }
  916. int ff_MPV_common_frame_size_change(MpegEncContext *s)
  917. {
  918. int i, err = 0;
  919. if (s->slice_context_count > 1) {
  920. for (i = 0; i < s->slice_context_count; i++) {
  921. free_duplicate_context(s->thread_context[i]);
  922. }
  923. for (i = 1; i < s->slice_context_count; i++) {
  924. av_freep(&s->thread_context[i]);
  925. }
  926. } else
  927. free_duplicate_context(s);
  928. free_context_frame(s);
  929. if (s->picture)
  930. for (i = 0; i < s->picture_count; i++) {
  931. s->picture[i].needs_realloc = 1;
  932. }
  933. s->last_picture_ptr =
  934. s->next_picture_ptr =
  935. s->current_picture_ptr = NULL;
  936. // init
  937. if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
  938. s->mb_height = (s->height + 31) / 32 * 2;
  939. else if (s->codec_id != AV_CODEC_ID_H264)
  940. s->mb_height = (s->height + 15) / 16;
  941. if ((s->width || s->height) &&
  942. av_image_check_size(s->width, s->height, 0, s->avctx))
  943. return AVERROR_INVALIDDATA;
  944. if ((err = init_context_frame(s)))
  945. goto fail;
  946. s->thread_context[0] = s;
  947. if (s->width && s->height) {
  948. int nb_slices = s->slice_context_count;
  949. if (nb_slices > 1) {
  950. for (i = 1; i < nb_slices; i++) {
  951. s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
  952. memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
  953. }
  954. for (i = 0; i < nb_slices; i++) {
  955. if (init_duplicate_context(s->thread_context[i], s) < 0)
  956. goto fail;
  957. s->thread_context[i]->start_mb_y =
  958. (s->mb_height * (i) + nb_slices / 2) / nb_slices;
  959. s->thread_context[i]->end_mb_y =
  960. (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
  961. }
  962. } else {
  963. if (init_duplicate_context(s, s) < 0)
  964. goto fail;
  965. s->start_mb_y = 0;
  966. s->end_mb_y = s->mb_height;
  967. }
  968. s->slice_context_count = nb_slices;
  969. }
  970. return 0;
  971. fail:
  972. ff_MPV_common_end(s);
  973. return err;
  974. }
  975. /* init common structure for both encoder and decoder */
  976. void ff_MPV_common_end(MpegEncContext *s)
  977. {
  978. int i;
  979. if (s->slice_context_count > 1) {
  980. for (i = 0; i < s->slice_context_count; i++) {
  981. free_duplicate_context(s->thread_context[i]);
  982. }
  983. for (i = 1; i < s->slice_context_count; i++) {
  984. av_freep(&s->thread_context[i]);
  985. }
  986. s->slice_context_count = 1;
  987. } else free_duplicate_context(s);
  988. av_freep(&s->parse_context.buffer);
  989. s->parse_context.buffer_size = 0;
  990. av_freep(&s->bitstream_buffer);
  991. s->allocated_bitstream_buffer_size = 0;
  992. av_freep(&s->avctx->stats_out);
  993. av_freep(&s->ac_stats);
  994. av_freep(&s->q_intra_matrix);
  995. av_freep(&s->q_inter_matrix);
  996. av_freep(&s->q_intra_matrix16);
  997. av_freep(&s->q_inter_matrix16);
  998. av_freep(&s->input_picture);
  999. av_freep(&s->reordered_input_picture);
  1000. av_freep(&s->dct_offset);
  1001. if (s->picture && !s->avctx->internal->is_copy) {
  1002. for (i = 0; i < s->picture_count; i++) {
  1003. free_picture(s, &s->picture[i]);
  1004. }
  1005. }
  1006. av_freep(&s->picture);
  1007. free_context_frame(s);
  1008. if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
  1009. avcodec_default_free_buffers(s->avctx);
  1010. s->context_initialized = 0;
  1011. s->last_picture_ptr =
  1012. s->next_picture_ptr =
  1013. s->current_picture_ptr = NULL;
  1014. s->linesize = s->uvlinesize = 0;
  1015. }
  1016. void ff_init_rl(RLTable *rl,
  1017. uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
  1018. {
  1019. int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
  1020. uint8_t index_run[MAX_RUN + 1];
  1021. int last, run, level, start, end, i;
  1022. /* If table is static, we can quit if rl->max_level[0] is not NULL */
  1023. if (static_store && rl->max_level[0])
  1024. return;
  1025. /* compute max_level[], max_run[] and index_run[] */
  1026. for (last = 0; last < 2; last++) {
  1027. if (last == 0) {
  1028. start = 0;
  1029. end = rl->last;
  1030. } else {
  1031. start = rl->last;
  1032. end = rl->n;
  1033. }
  1034. memset(max_level, 0, MAX_RUN + 1);
  1035. memset(max_run, 0, MAX_LEVEL + 1);
  1036. memset(index_run, rl->n, MAX_RUN + 1);
  1037. for (i = start; i < end; i++) {
  1038. run = rl->table_run[i];
  1039. level = rl->table_level[i];
  1040. if (index_run[run] == rl->n)
  1041. index_run[run] = i;
  1042. if (level > max_level[run])
  1043. max_level[run] = level;
  1044. if (run > max_run[level])
  1045. max_run[level] = run;
  1046. }
  1047. if (static_store)
  1048. rl->max_level[last] = static_store[last];
  1049. else
  1050. rl->max_level[last] = av_malloc(MAX_RUN + 1);
  1051. memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
  1052. if (static_store)
  1053. rl->max_run[last] = static_store[last] + MAX_RUN + 1;
  1054. else
  1055. rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
  1056. memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
  1057. if (static_store)
  1058. rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
  1059. else
  1060. rl->index_run[last] = av_malloc(MAX_RUN + 1);
  1061. memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
  1062. }
  1063. }
  1064. void ff_init_vlc_rl(RLTable *rl)
  1065. {
  1066. int i, q;
  1067. for (q = 0; q < 32; q++) {
  1068. int qmul = q * 2;
  1069. int qadd = (q - 1) | 1;
  1070. if (q == 0) {
  1071. qmul = 1;
  1072. qadd = 0;
  1073. }
  1074. for (i = 0; i < rl->vlc.table_size; i++) {
  1075. int code = rl->vlc.table[i][0];
  1076. int len = rl->vlc.table[i][1];
  1077. int level, run;
  1078. if (len == 0) { // illegal code
  1079. run = 66;
  1080. level = MAX_LEVEL;
  1081. } else if (len < 0) { // more bits needed
  1082. run = 0;
  1083. level = code;
  1084. } else {
  1085. if (code == rl->n) { // esc
  1086. run = 66;
  1087. level = 0;
  1088. } else {
  1089. run = rl->table_run[code] + 1;
  1090. level = rl->table_level[code] * qmul + qadd;
  1091. if (code >= rl->last) run += 192;
  1092. }
  1093. }
  1094. rl->rl_vlc[q][i].len = len;
  1095. rl->rl_vlc[q][i].level = level;
  1096. rl->rl_vlc[q][i].run = run;
  1097. }
  1098. }
  1099. }
  1100. void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
  1101. {
  1102. int i;
  1103. /* release non reference frames */
  1104. for (i = 0; i < s->picture_count; i++) {
  1105. if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
  1106. (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
  1107. (remove_current || &s->picture[i] != s->current_picture_ptr)
  1108. /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
  1109. free_frame_buffer(s, &s->picture[i]);
  1110. }
  1111. }
  1112. }
  1113. static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
  1114. {
  1115. if (pic->f.data[0] == NULL)
  1116. return 1;
  1117. if (pic->needs_realloc && !(pic->f.reference & DELAYED_PIC_REF))
  1118. if (!pic->owner2 || pic->owner2 == s)
  1119. return 1;
  1120. return 0;
  1121. }
  1122. static int find_unused_picture(MpegEncContext *s, int shared)
  1123. {
  1124. int i;
  1125. if (shared) {
  1126. for (i = s->picture_range_start; i < s->picture_range_end; i++) {
  1127. if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
  1128. return i;
  1129. }
  1130. } else {
  1131. for (i = s->picture_range_start; i < s->picture_range_end; i++) {
  1132. if (pic_is_unused(s, &s->picture[i]) && s->picture[i].f.type != 0)
  1133. return i; // FIXME
  1134. }
  1135. for (i = s->picture_range_start; i < s->picture_range_end; i++) {
  1136. if (pic_is_unused(s, &s->picture[i]))
  1137. return i;
  1138. }
  1139. }
  1140. return AVERROR_INVALIDDATA;
  1141. }
  1142. int ff_find_unused_picture(MpegEncContext *s, int shared)
  1143. {
  1144. int ret = find_unused_picture(s, shared);
  1145. if (ret >= 0 && ret < s->picture_range_end) {
  1146. if (s->picture[ret].needs_realloc) {
  1147. s->picture[ret].needs_realloc = 0;
  1148. free_picture(s, &s->picture[ret]);
  1149. avcodec_get_frame_defaults(&s->picture[ret].f);
  1150. }
  1151. }
  1152. return ret;
  1153. }
  1154. static void update_noise_reduction(MpegEncContext *s)
  1155. {
  1156. int intra, i;
  1157. for (intra = 0; intra < 2; intra++) {
  1158. if (s->dct_count[intra] > (1 << 16)) {
  1159. for (i = 0; i < 64; i++) {
  1160. s->dct_error_sum[intra][i] >>= 1;
  1161. }
  1162. s->dct_count[intra] >>= 1;
  1163. }
  1164. for (i = 0; i < 64; i++) {
  1165. s->dct_offset[intra][i] = (s->avctx->noise_reduction *
  1166. s->dct_count[intra] +
  1167. s->dct_error_sum[intra][i] / 2) /
  1168. (s->dct_error_sum[intra][i] + 1);
  1169. }
  1170. }
  1171. }
  1172. /**
  1173. * generic function for encode/decode called after coding/decoding
  1174. * the header and before a frame is coded/decoded.
  1175. */
  1176. int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
  1177. {
  1178. int i;
  1179. Picture *pic;
  1180. s->mb_skipped = 0;
  1181. /* mark & release old frames */
  1182. if (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3) {
  1183. if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
  1184. s->last_picture_ptr != s->next_picture_ptr &&
  1185. s->last_picture_ptr->f.data[0]) {
  1186. if (s->last_picture_ptr->owner2 == s)
  1187. free_frame_buffer(s, s->last_picture_ptr);
  1188. }
  1189. /* release forgotten pictures */
  1190. /* if (mpeg124/h263) */
  1191. if (!s->encoding) {
  1192. for (i = 0; i < s->picture_count; i++) {
  1193. if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
  1194. &s->picture[i] != s->last_picture_ptr &&
  1195. &s->picture[i] != s->next_picture_ptr &&
  1196. s->picture[i].f.reference && !s->picture[i].needs_realloc) {
  1197. if (!(avctx->active_thread_type & FF_THREAD_FRAME))
  1198. av_log(avctx, AV_LOG_ERROR,
  1199. "releasing zombie picture\n");
  1200. free_frame_buffer(s, &s->picture[i]);
  1201. }
  1202. }
  1203. }
  1204. }
  1205. if (!s->encoding) {
  1206. ff_release_unused_pictures(s, 1);
  1207. if (s->current_picture_ptr &&
  1208. s->current_picture_ptr->f.data[0] == NULL) {
  1209. // we already have a unused image
  1210. // (maybe it was set before reading the header)
  1211. pic = s->current_picture_ptr;
  1212. } else {
  1213. i = ff_find_unused_picture(s, 0);
  1214. if (i < 0) {
  1215. av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
  1216. return i;
  1217. }
  1218. pic = &s->picture[i];
  1219. }
  1220. pic->f.reference = 0;
  1221. if (!s->droppable) {
  1222. if (s->codec_id == AV_CODEC_ID_H264)
  1223. pic->f.reference = s->picture_structure;
  1224. else if (s->pict_type != AV_PICTURE_TYPE_B)
  1225. pic->f.reference = 3;
  1226. }
  1227. pic->f.coded_picture_number = s->coded_picture_number++;
  1228. if (ff_alloc_picture(s, pic, 0) < 0)
  1229. return -1;
  1230. s->current_picture_ptr = pic;
  1231. // FIXME use only the vars from current_pic
  1232. s->current_picture_ptr->f.top_field_first = s->top_field_first;
  1233. if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
  1234. s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
  1235. if (s->picture_structure != PICT_FRAME)
  1236. s->current_picture_ptr->f.top_field_first =
  1237. (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
  1238. }
  1239. s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
  1240. !s->progressive_sequence;
  1241. s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
  1242. }
  1243. s->current_picture_ptr->f.pict_type = s->pict_type;
  1244. // if (s->flags && CODEC_FLAG_QSCALE)
  1245. // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
  1246. s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
  1247. ff_copy_picture(&s->current_picture, s->current_picture_ptr);
  1248. if (s->pict_type != AV_PICTURE_TYPE_B) {
  1249. s->last_picture_ptr = s->next_picture_ptr;
  1250. if (!s->droppable)
  1251. s->next_picture_ptr = s->current_picture_ptr;
  1252. }
  1253. av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
  1254. s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
  1255. s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
  1256. s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
  1257. s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
  1258. s->pict_type, s->droppable);
  1259. if (s->codec_id != AV_CODEC_ID_H264) {
  1260. if ((s->last_picture_ptr == NULL ||
  1261. s->last_picture_ptr->f.data[0] == NULL) &&
  1262. (s->pict_type != AV_PICTURE_TYPE_I ||
  1263. s->picture_structure != PICT_FRAME)) {
  1264. if (s->pict_type != AV_PICTURE_TYPE_I)
  1265. av_log(avctx, AV_LOG_ERROR,
  1266. "warning: first frame is no keyframe\n");
  1267. else if (s->picture_structure != PICT_FRAME)
  1268. av_log(avctx, AV_LOG_INFO,
  1269. "allocate dummy last picture for field based first keyframe\n");
  1270. /* Allocate a dummy frame */
  1271. i = ff_find_unused_picture(s, 0);
  1272. if (i < 0) {
  1273. av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
  1274. return i;
  1275. }
  1276. s->last_picture_ptr = &s->picture[i];
  1277. if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
  1278. s->last_picture_ptr = NULL;
  1279. return -1;
  1280. }
  1281. ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
  1282. ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
  1283. s->last_picture_ptr->f.reference = 3;
  1284. }
  1285. if ((s->next_picture_ptr == NULL ||
  1286. s->next_picture_ptr->f.data[0] == NULL) &&
  1287. s->pict_type == AV_PICTURE_TYPE_B) {
  1288. /* Allocate a dummy frame */
  1289. i = ff_find_unused_picture(s, 0);
  1290. if (i < 0) {
  1291. av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
  1292. return i;
  1293. }
  1294. s->next_picture_ptr = &s->picture[i];
  1295. if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
  1296. s->next_picture_ptr = NULL;
  1297. return -1;
  1298. }
  1299. ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
  1300. ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
  1301. s->next_picture_ptr->f.reference = 3;
  1302. }
  1303. }
  1304. if (s->last_picture_ptr)
  1305. ff_copy_picture(&s->last_picture, s->last_picture_ptr);
  1306. if (s->next_picture_ptr)
  1307. ff_copy_picture(&s->next_picture, s->next_picture_ptr);
  1308. if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME)) {
  1309. if (s->next_picture_ptr)
  1310. s->next_picture_ptr->owner2 = s;
  1311. if (s->last_picture_ptr)
  1312. s->last_picture_ptr->owner2 = s;
  1313. }
  1314. assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
  1315. s->last_picture_ptr->f.data[0]));
  1316. if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
  1317. int i;
  1318. for (i = 0; i < 4; i++) {
  1319. if (s->picture_structure == PICT_BOTTOM_FIELD) {
  1320. s->current_picture.f.data[i] +=
  1321. s->current_picture.f.linesize[i];
  1322. }
  1323. s->current_picture.f.linesize[i] *= 2;
  1324. s->last_picture.f.linesize[i] *= 2;
  1325. s->next_picture.f.linesize[i] *= 2;
  1326. }
  1327. }
  1328. s->err_recognition = avctx->err_recognition;
  1329. /* set dequantizer, we can't do it during init as
  1330. * it might change for mpeg4 and we can't do it in the header
  1331. * decode as init is not called for mpeg4 there yet */
  1332. if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
  1333. s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
  1334. s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
  1335. } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
  1336. s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
  1337. s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
  1338. } else {
  1339. s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
  1340. s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
  1341. }
  1342. if (s->dct_error_sum) {
  1343. assert(s->avctx->noise_reduction && s->encoding);
  1344. update_noise_reduction(s);
  1345. }
  1346. if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
  1347. return ff_xvmc_field_start(s, avctx);
  1348. return 0;
  1349. }
  1350. /* generic function for encode/decode called after a
  1351. * frame has been coded/decoded. */
  1352. void ff_MPV_frame_end(MpegEncContext *s)
  1353. {
  1354. int i;
  1355. /* redraw edges for the frame if decoding didn't complete */
  1356. // just to make sure that all data is rendered.
  1357. if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
  1358. ff_xvmc_field_end(s);
  1359. } else if ((s->error_count || s->encoding) &&
  1360. !s->avctx->hwaccel &&
  1361. !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
  1362. s->unrestricted_mv &&
  1363. s->current_picture.f.reference &&
  1364. !s->intra_only &&
  1365. !(s->flags & CODEC_FLAG_EMU_EDGE)) {
  1366. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
  1367. int hshift = desc->log2_chroma_w;
  1368. int vshift = desc->log2_chroma_h;
  1369. s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
  1370. s->h_edge_pos, s->v_edge_pos,
  1371. EDGE_WIDTH, EDGE_WIDTH,
  1372. EDGE_TOP | EDGE_BOTTOM);
  1373. s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
  1374. s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
  1375. EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
  1376. EDGE_TOP | EDGE_BOTTOM);
  1377. s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
  1378. s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
  1379. EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
  1380. EDGE_TOP | EDGE_BOTTOM);
  1381. }
  1382. emms_c();
  1383. s->last_pict_type = s->pict_type;
  1384. s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
  1385. if (s->pict_type!= AV_PICTURE_TYPE_B) {
  1386. s->last_non_b_pict_type = s->pict_type;
  1387. }
  1388. #if 0
  1389. /* copy back current_picture variables */
  1390. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1391. if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
  1392. s->picture[i] = s->current_picture;
  1393. break;
  1394. }
  1395. }
  1396. assert(i < MAX_PICTURE_COUNT);
  1397. #endif
  1398. if (s->encoding) {
  1399. /* release non-reference frames */
  1400. for (i = 0; i < s->picture_count; i++) {
  1401. if (s->picture[i].f.data[0] && !s->picture[i].f.reference
  1402. /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
  1403. free_frame_buffer(s, &s->picture[i]);
  1404. }
  1405. }
  1406. }
  1407. // clear copies, to avoid confusion
  1408. #if 0
  1409. memset(&s->last_picture, 0, sizeof(Picture));
  1410. memset(&s->next_picture, 0, sizeof(Picture));
  1411. memset(&s->current_picture, 0, sizeof(Picture));
  1412. #endif
  1413. s->avctx->coded_frame = &s->current_picture_ptr->f;
  1414. if (s->codec_id != AV_CODEC_ID_H264 && s->current_picture.f.reference) {
  1415. ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
  1416. }
  1417. }
  1418. /**
  1419. * Draw a line from (ex, ey) -> (sx, sy).
  1420. * @param w width of the image
  1421. * @param h height of the image
  1422. * @param stride stride/linesize of the image
  1423. * @param color color of the arrow
  1424. */
  1425. static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
  1426. int w, int h, int stride, int color)
  1427. {
  1428. int x, y, fr, f;
  1429. sx = av_clip(sx, 0, w - 1);
  1430. sy = av_clip(sy, 0, h - 1);
  1431. ex = av_clip(ex, 0, w - 1);
  1432. ey = av_clip(ey, 0, h - 1);
  1433. buf[sy * stride + sx] += color;
  1434. if (FFABS(ex - sx) > FFABS(ey - sy)) {
  1435. if (sx > ex) {
  1436. FFSWAP(int, sx, ex);
  1437. FFSWAP(int, sy, ey);
  1438. }
  1439. buf += sx + sy * stride;
  1440. ex -= sx;
  1441. f = ((ey - sy) << 16) / ex;
  1442. for (x = 0; x = ex; x++) {
  1443. y = (x * f) >> 16;
  1444. fr = (x * f) & 0xFFFF;
  1445. buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
  1446. buf[(y + 1) * stride + x] += (color * fr ) >> 16;
  1447. }
  1448. } else {
  1449. if (sy > ey) {
  1450. FFSWAP(int, sx, ex);
  1451. FFSWAP(int, sy, ey);
  1452. }
  1453. buf += sx + sy * stride;
  1454. ey -= sy;
  1455. if (ey)
  1456. f = ((ex - sx) << 16) / ey;
  1457. else
  1458. f = 0;
  1459. for (y = 0; y = ey; y++) {
  1460. x = (y * f) >> 16;
  1461. fr = (y * f) & 0xFFFF;
  1462. buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
  1463. buf[y * stride + x + 1] += (color * fr ) >> 16;
  1464. }
  1465. }
  1466. }
  1467. /**
  1468. * Draw an arrow from (ex, ey) -> (sx, sy).
  1469. * @param w width of the image
  1470. * @param h height of the image
  1471. * @param stride stride/linesize of the image
  1472. * @param color color of the arrow
  1473. */
  1474. static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
  1475. int ey, int w, int h, int stride, int color)
  1476. {
  1477. int dx,dy;
  1478. sx = av_clip(sx, -100, w + 100);
  1479. sy = av_clip(sy, -100, h + 100);
  1480. ex = av_clip(ex, -100, w + 100);
  1481. ey = av_clip(ey, -100, h + 100);
  1482. dx = ex - sx;
  1483. dy = ey - sy;
  1484. if (dx * dx + dy * dy > 3 * 3) {
  1485. int rx = dx + dy;
  1486. int ry = -dx + dy;
  1487. int length = ff_sqrt((rx * rx + ry * ry) << 8);
  1488. // FIXME subpixel accuracy
  1489. rx = ROUNDED_DIV(rx * 3 << 4, length);
  1490. ry = ROUNDED_DIV(ry * 3 << 4, length);
  1491. draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
  1492. draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
  1493. }
  1494. draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
  1495. }
  1496. /**
  1497. * Print debugging info for the given picture.
  1498. */
  1499. void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
  1500. {
  1501. if (s->avctx->hwaccel || !pict || !pict->mb_type)
  1502. return;
  1503. if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
  1504. int x,y;
  1505. av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
  1506. switch (pict->pict_type) {
  1507. case AV_PICTURE_TYPE_I:
  1508. av_log(s->avctx,AV_LOG_DEBUG,"I\n");
  1509. break;
  1510. case AV_PICTURE_TYPE_P:
  1511. av_log(s->avctx,AV_LOG_DEBUG,"P\n");
  1512. break;
  1513. case AV_PICTURE_TYPE_B:
  1514. av_log(s->avctx,AV_LOG_DEBUG,"B\n");
  1515. break;
  1516. case AV_PICTURE_TYPE_S:
  1517. av_log(s->avctx,AV_LOG_DEBUG,"S\n");
  1518. break;
  1519. case AV_PICTURE_TYPE_SI:
  1520. av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
  1521. break;
  1522. case AV_PICTURE_TYPE_SP:
  1523. av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
  1524. break;
  1525. }
  1526. for (y = 0; y < s->mb_height; y++) {
  1527. for (x = 0; x < s->mb_width; x++) {
  1528. if (s->avctx->debug & FF_DEBUG_SKIP) {
  1529. int count = s->mbskip_table[x + y * s->mb_stride];
  1530. if (count > 9)
  1531. count = 9;
  1532. av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
  1533. }
  1534. if (s->avctx->debug & FF_DEBUG_QP) {
  1535. av_log(s->avctx, AV_LOG_DEBUG, "%2d",
  1536. pict->qscale_table[x + y * s->mb_stride]);
  1537. }
  1538. if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
  1539. int mb_type = pict->mb_type[x + y * s->mb_stride];
  1540. // Type & MV direction
  1541. if (IS_PCM(mb_type))
  1542. av_log(s->avctx, AV_LOG_DEBUG, "P");
  1543. else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
  1544. av_log(s->avctx, AV_LOG_DEBUG, "A");
  1545. else if (IS_INTRA4x4(mb_type))
  1546. av_log(s->avctx, AV_LOG_DEBUG, "i");
  1547. else if (IS_INTRA16x16(mb_type))
  1548. av_log(s->avctx, AV_LOG_DEBUG, "I");
  1549. else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
  1550. av_log(s->avctx, AV_LOG_DEBUG, "d");
  1551. else if (IS_DIRECT(mb_type))
  1552. av_log(s->avctx, AV_LOG_DEBUG, "D");
  1553. else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
  1554. av_log(s->avctx, AV_LOG_DEBUG, "g");
  1555. else if (IS_GMC(mb_type))
  1556. av_log(s->avctx, AV_LOG_DEBUG, "G");
  1557. else if (IS_SKIP(mb_type))
  1558. av_log(s->avctx, AV_LOG_DEBUG, "S");
  1559. else if (!USES_LIST(mb_type, 1))
  1560. av_log(s->avctx, AV_LOG_DEBUG, ">");
  1561. else if (!USES_LIST(mb_type, 0))
  1562. av_log(s->avctx, AV_LOG_DEBUG, "<");
  1563. else {
  1564. assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
  1565. av_log(s->avctx, AV_LOG_DEBUG, "X");
  1566. }
  1567. // segmentation
  1568. if (IS_8X8(mb_type))
  1569. av_log(s->avctx, AV_LOG_DEBUG, "+");
  1570. else if (IS_16X8(mb_type))
  1571. av_log(s->avctx, AV_LOG_DEBUG, "-");
  1572. else if (IS_8X16(mb_type))
  1573. av_log(s->avctx, AV_LOG_DEBUG, "|");
  1574. else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
  1575. av_log(s->avctx, AV_LOG_DEBUG, " ");
  1576. else
  1577. av_log(s->avctx, AV_LOG_DEBUG, "?");
  1578. if (IS_INTERLACED(mb_type))
  1579. av_log(s->avctx, AV_LOG_DEBUG, "=");
  1580. else
  1581. av_log(s->avctx, AV_LOG_DEBUG, " ");
  1582. }
  1583. }
  1584. av_log(s->avctx, AV_LOG_DEBUG, "\n");
  1585. }
  1586. }
  1587. if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
  1588. (s->avctx->debug_mv)) {
  1589. const int shift = 1 + s->quarter_sample;
  1590. int mb_y;
  1591. uint8_t *ptr;
  1592. int i;
  1593. int h_chroma_shift, v_chroma_shift, block_height;
  1594. const int width = s->avctx->width;
  1595. const int height = s->avctx->height;
  1596. const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
  1597. const int mv_stride = (s->mb_width << mv_sample_log2) +
  1598. (s->codec_id == AV_CODEC_ID_H264 ? 0 : 1);
  1599. s->low_delay = 0; // needed to see the vectors without trashing the buffers
  1600. av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
  1601. &h_chroma_shift, &v_chroma_shift);
  1602. for (i = 0; i < 3; i++) {
  1603. memcpy(s->visualization_buffer[i], pict->data[i],
  1604. (i == 0) ? pict->linesize[i] * height:
  1605. pict->linesize[i] * height >> v_chroma_shift);
  1606. pict->data[i] = s->visualization_buffer[i];
  1607. }
  1608. pict->type = FF_BUFFER_TYPE_COPY;
  1609. ptr = pict->data[0];
  1610. block_height = 16 >> v_chroma_shift;
  1611. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  1612. int mb_x;
  1613. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  1614. const int mb_index = mb_x + mb_y * s->mb_stride;
  1615. if ((s->avctx->debug_mv) && pict->motion_val) {
  1616. int type;
  1617. for (type = 0; type < 3; type++) {
  1618. int direction = 0;
  1619. switch (type) {
  1620. case 0:
  1621. if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
  1622. (pict->pict_type!= AV_PICTURE_TYPE_P))
  1623. continue;
  1624. direction = 0;
  1625. break;
  1626. case 1:
  1627. if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
  1628. (pict->pict_type!= AV_PICTURE_TYPE_B))
  1629. continue;
  1630. direction = 0;
  1631. break;
  1632. case 2:
  1633. if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
  1634. (pict->pict_type!= AV_PICTURE_TYPE_B))
  1635. continue;
  1636. direction = 1;
  1637. break;
  1638. }
  1639. if (!USES_LIST(pict->mb_type[mb_index], direction))
  1640. continue;
  1641. if (IS_8X8(pict->mb_type[mb_index])) {
  1642. int i;
  1643. for (i = 0; i < 4; i++) {
  1644. int sx = mb_x * 16 + 4 + 8 * (i & 1);
  1645. int sy = mb_y * 16 + 4 + 8 * (i >> 1);
  1646. int xy = (mb_x * 2 + (i & 1) +
  1647. (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
  1648. int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
  1649. int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
  1650. draw_arrow(ptr, sx, sy, mx, my, width,
  1651. height, s->linesize, 100);
  1652. }
  1653. } else if (IS_16X8(pict->mb_type[mb_index])) {
  1654. int i;
  1655. for (i = 0; i < 2; i++) {
  1656. int sx = mb_x * 16 + 8;
  1657. int sy = mb_y * 16 + 4 + 8 * i;
  1658. int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
  1659. int mx = (pict->motion_val[direction][xy][0] >> shift);
  1660. int my = (pict->motion_val[direction][xy][1] >> shift);
  1661. if (IS_INTERLACED(pict->mb_type[mb_index]))
  1662. my *= 2;
  1663. draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
  1664. height, s->linesize, 100);
  1665. }
  1666. } else if (IS_8X16(pict->mb_type[mb_index])) {
  1667. int i;
  1668. for (i = 0; i < 2; i++) {
  1669. int sx = mb_x * 16 + 4 + 8 * i;
  1670. int sy = mb_y * 16 + 8;
  1671. int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
  1672. int mx = pict->motion_val[direction][xy][0] >> shift;
  1673. int my = pict->motion_val[direction][xy][1] >> shift;
  1674. if (IS_INTERLACED(pict->mb_type[mb_index]))
  1675. my *= 2;
  1676. draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
  1677. height, s->linesize, 100);
  1678. }
  1679. } else {
  1680. int sx = mb_x * 16 + 8;
  1681. int sy = mb_y * 16 + 8;
  1682. int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
  1683. int mx = pict->motion_val[direction][xy][0] >> shift + sx;
  1684. int my = pict->motion_val[direction][xy][1] >> shift + sy;
  1685. draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
  1686. }
  1687. }
  1688. }
  1689. if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
  1690. uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
  1691. 0x0101010101010101ULL;
  1692. int y;
  1693. for (y = 0; y < block_height; y++) {
  1694. *(uint64_t *)(pict->data[1] + 8 * mb_x +
  1695. (block_height * mb_y + y) *
  1696. pict->linesize[1]) = c;
  1697. *(uint64_t *)(pict->data[2] + 8 * mb_x +
  1698. (block_height * mb_y + y) *
  1699. pict->linesize[2]) = c;
  1700. }
  1701. }
  1702. if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
  1703. pict->motion_val) {
  1704. int mb_type = pict->mb_type[mb_index];
  1705. uint64_t u,v;
  1706. int y;
  1707. #define COLOR(theta, r) \
  1708. u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
  1709. v = (int)(128 + r * sin(theta * 3.141592 / 180));
  1710. u = v = 128;
  1711. if (IS_PCM(mb_type)) {
  1712. COLOR(120, 48)
  1713. } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
  1714. IS_INTRA16x16(mb_type)) {
  1715. COLOR(30, 48)
  1716. } else if (IS_INTRA4x4(mb_type)) {
  1717. COLOR(90, 48)
  1718. } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
  1719. // COLOR(120, 48)
  1720. } else if (IS_DIRECT(mb_type)) {
  1721. COLOR(150, 48)
  1722. } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
  1723. COLOR(170, 48)
  1724. } else if (IS_GMC(mb_type)) {
  1725. COLOR(190, 48)
  1726. } else if (IS_SKIP(mb_type)) {
  1727. // COLOR(180, 48)
  1728. } else if (!USES_LIST(mb_type, 1)) {
  1729. COLOR(240, 48)
  1730. } else if (!USES_LIST(mb_type, 0)) {
  1731. COLOR(0, 48)
  1732. } else {
  1733. assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
  1734. COLOR(300,48)
  1735. }
  1736. u *= 0x0101010101010101ULL;
  1737. v *= 0x0101010101010101ULL;
  1738. for (y = 0; y < block_height; y++) {
  1739. *(uint64_t *)(pict->data[1] + 8 * mb_x +
  1740. (block_height * mb_y + y) * pict->linesize[1]) = u;
  1741. *(uint64_t *)(pict->data[2] + 8 * mb_x +
  1742. (block_height * mb_y + y) * pict->linesize[2]) = v;
  1743. }
  1744. // segmentation
  1745. if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
  1746. *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
  1747. (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
  1748. *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
  1749. (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
  1750. }
  1751. if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
  1752. for (y = 0; y < 16; y++)
  1753. pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
  1754. pict->linesize[0]] ^= 0x80;
  1755. }
  1756. if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
  1757. int dm = 1 << (mv_sample_log2 - 2);
  1758. for (i = 0; i < 4; i++) {
  1759. int sx = mb_x * 16 + 8 * (i & 1);
  1760. int sy = mb_y * 16 + 8 * (i >> 1);
  1761. int xy = (mb_x * 2 + (i & 1) +
  1762. (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
  1763. // FIXME bidir
  1764. int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
  1765. if (mv[0] != mv[dm] ||
  1766. mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
  1767. for (y = 0; y < 8; y++)
  1768. pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
  1769. if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
  1770. *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
  1771. pict->linesize[0]) ^= 0x8080808080808080ULL;
  1772. }
  1773. }
  1774. if (IS_INTERLACED(mb_type) &&
  1775. s->codec_id == AV_CODEC_ID_H264) {
  1776. // hmm
  1777. }
  1778. }
  1779. s->mbskip_table[mb_index] = 0;
  1780. }
  1781. }
  1782. }
  1783. }
  1784. /**
  1785. * find the lowest MB row referenced in the MVs
  1786. */
  1787. int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
  1788. {
  1789. int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
  1790. int my, off, i, mvs;
  1791. if (s->picture_structure != PICT_FRAME || s->mcsel)
  1792. goto unhandled;
  1793. switch (s->mv_type) {
  1794. case MV_TYPE_16X16:
  1795. mvs = 1;
  1796. break;
  1797. case MV_TYPE_16X8:
  1798. mvs = 2;
  1799. break;
  1800. case MV_TYPE_8X8:
  1801. mvs = 4;
  1802. break;
  1803. default:
  1804. goto unhandled;
  1805. }
  1806. for (i = 0; i < mvs; i++) {
  1807. my = s->mv[dir][i][1]<<qpel_shift;
  1808. my_max = FFMAX(my_max, my);
  1809. my_min = FFMIN(my_min, my);
  1810. }
  1811. off = (FFMAX(-my_min, my_max) + 63) >> 6;
  1812. return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
  1813. unhandled:
  1814. return s->mb_height-1;
  1815. }
  1816. /* put block[] to dest[] */
  1817. static inline void put_dct(MpegEncContext *s,
  1818. DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
  1819. {
  1820. s->dct_unquantize_intra(s, block, i, qscale);
  1821. s->dsp.idct_put (dest, line_size, block);
  1822. }
  1823. /* add block[] to dest[] */
  1824. static inline void add_dct(MpegEncContext *s,
  1825. DCTELEM *block, int i, uint8_t *dest, int line_size)
  1826. {
  1827. if (s->block_last_index[i] >= 0) {
  1828. s->dsp.idct_add (dest, line_size, block);
  1829. }
  1830. }
  1831. static inline void add_dequant_dct(MpegEncContext *s,
  1832. DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
  1833. {
  1834. if (s->block_last_index[i] >= 0) {
  1835. s->dct_unquantize_inter(s, block, i, qscale);
  1836. s->dsp.idct_add (dest, line_size, block);
  1837. }
  1838. }
  1839. /**
  1840. * Clean dc, ac, coded_block for the current non-intra MB.
  1841. */
  1842. void ff_clean_intra_table_entries(MpegEncContext *s)
  1843. {
  1844. int wrap = s->b8_stride;
  1845. int xy = s->block_index[0];
  1846. s->dc_val[0][xy ] =
  1847. s->dc_val[0][xy + 1 ] =
  1848. s->dc_val[0][xy + wrap] =
  1849. s->dc_val[0][xy + 1 + wrap] = 1024;
  1850. /* ac pred */
  1851. memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
  1852. memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
  1853. if (s->msmpeg4_version>=3) {
  1854. s->coded_block[xy ] =
  1855. s->coded_block[xy + 1 ] =
  1856. s->coded_block[xy + wrap] =
  1857. s->coded_block[xy + 1 + wrap] = 0;
  1858. }
  1859. /* chroma */
  1860. wrap = s->mb_stride;
  1861. xy = s->mb_x + s->mb_y * wrap;
  1862. s->dc_val[1][xy] =
  1863. s->dc_val[2][xy] = 1024;
  1864. /* ac pred */
  1865. memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
  1866. memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
  1867. s->mbintra_table[xy]= 0;
  1868. }
  1869. /* generic function called after a macroblock has been parsed by the
  1870. decoder or after it has been encoded by the encoder.
  1871. Important variables used:
  1872. s->mb_intra : true if intra macroblock
  1873. s->mv_dir : motion vector direction
  1874. s->mv_type : motion vector type
  1875. s->mv : motion vector
  1876. s->interlaced_dct : true if interlaced dct used (mpeg2)
  1877. */
  1878. static av_always_inline
  1879. void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
  1880. int is_mpeg12)
  1881. {
  1882. const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
  1883. if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
  1884. ff_xvmc_decode_mb(s);//xvmc uses pblocks
  1885. return;
  1886. }
  1887. if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
  1888. /* save DCT coefficients */
  1889. int i,j;
  1890. DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
  1891. av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
  1892. for(i=0; i<6; i++){
  1893. for(j=0; j<64; j++){
  1894. *dct++ = block[i][s->dsp.idct_permutation[j]];
  1895. av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
  1896. }
  1897. av_log(s->avctx, AV_LOG_DEBUG, "\n");
  1898. }
  1899. }
  1900. s->current_picture.f.qscale_table[mb_xy] = s->qscale;
  1901. /* update DC predictors for P macroblocks */
  1902. if (!s->mb_intra) {
  1903. if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
  1904. if(s->mbintra_table[mb_xy])
  1905. ff_clean_intra_table_entries(s);
  1906. } else {
  1907. s->last_dc[0] =
  1908. s->last_dc[1] =
  1909. s->last_dc[2] = 128 << s->intra_dc_precision;
  1910. }
  1911. }
  1912. else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
  1913. s->mbintra_table[mb_xy]=1;
  1914. if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
  1915. uint8_t *dest_y, *dest_cb, *dest_cr;
  1916. int dct_linesize, dct_offset;
  1917. op_pixels_func (*op_pix)[4];
  1918. qpel_mc_func (*op_qpix)[16];
  1919. const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
  1920. const int uvlinesize = s->current_picture.f.linesize[1];
  1921. const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
  1922. const int block_size = 8;
  1923. /* avoid copy if macroblock skipped in last frame too */
  1924. /* skip only during decoding as we might trash the buffers during encoding a bit */
  1925. if(!s->encoding){
  1926. uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
  1927. if (s->mb_skipped) {
  1928. s->mb_skipped= 0;
  1929. assert(s->pict_type!=AV_PICTURE_TYPE_I);
  1930. *mbskip_ptr = 1;
  1931. } else if(!s->current_picture.f.reference) {
  1932. *mbskip_ptr = 1;
  1933. } else{
  1934. *mbskip_ptr = 0; /* not skipped */
  1935. }
  1936. }
  1937. dct_linesize = linesize << s->interlaced_dct;
  1938. dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
  1939. if(readable){
  1940. dest_y= s->dest[0];
  1941. dest_cb= s->dest[1];
  1942. dest_cr= s->dest[2];
  1943. }else{
  1944. dest_y = s->b_scratchpad;
  1945. dest_cb= s->b_scratchpad+16*linesize;
  1946. dest_cr= s->b_scratchpad+32*linesize;
  1947. }
  1948. if (!s->mb_intra) {
  1949. /* motion handling */
  1950. /* decoding or more than one mb_type (MC was already done otherwise) */
  1951. if(!s->encoding){
  1952. if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
  1953. if (s->mv_dir & MV_DIR_FORWARD) {
  1954. ff_thread_await_progress(&s->last_picture_ptr->f,
  1955. ff_MPV_lowest_referenced_row(s, 0),
  1956. 0);
  1957. }
  1958. if (s->mv_dir & MV_DIR_BACKWARD) {
  1959. ff_thread_await_progress(&s->next_picture_ptr->f,
  1960. ff_MPV_lowest_referenced_row(s, 1),
  1961. 0);
  1962. }
  1963. }
  1964. op_qpix= s->me.qpel_put;
  1965. if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
  1966. op_pix = s->dsp.put_pixels_tab;
  1967. }else{
  1968. op_pix = s->dsp.put_no_rnd_pixels_tab;
  1969. }
  1970. if (s->mv_dir & MV_DIR_FORWARD) {
  1971. ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
  1972. op_pix = s->dsp.avg_pixels_tab;
  1973. op_qpix= s->me.qpel_avg;
  1974. }
  1975. if (s->mv_dir & MV_DIR_BACKWARD) {
  1976. ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
  1977. }
  1978. }
  1979. /* skip dequant / idct if we are really late ;) */
  1980. if(s->avctx->skip_idct){
  1981. if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
  1982. ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
  1983. || s->avctx->skip_idct >= AVDISCARD_ALL)
  1984. goto skip_idct;
  1985. }
  1986. /* add dct residue */
  1987. if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
  1988. || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
  1989. add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
  1990. add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
  1991. add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
  1992. add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
  1993. if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
  1994. if (s->chroma_y_shift){
  1995. add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
  1996. add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
  1997. }else{
  1998. dct_linesize >>= 1;
  1999. dct_offset >>=1;
  2000. add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
  2001. add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
  2002. add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
  2003. add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
  2004. }
  2005. }
  2006. } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
  2007. add_dct(s, block[0], 0, dest_y , dct_linesize);
  2008. add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
  2009. add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
  2010. add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
  2011. if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
  2012. if(s->chroma_y_shift){//Chroma420
  2013. add_dct(s, block[4], 4, dest_cb, uvlinesize);
  2014. add_dct(s, block[5], 5, dest_cr, uvlinesize);
  2015. }else{
  2016. //chroma422
  2017. dct_linesize = uvlinesize << s->interlaced_dct;
  2018. dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
  2019. add_dct(s, block[4], 4, dest_cb, dct_linesize);
  2020. add_dct(s, block[5], 5, dest_cr, dct_linesize);
  2021. add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
  2022. add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
  2023. if(!s->chroma_x_shift){//Chroma444
  2024. add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
  2025. add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
  2026. add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
  2027. add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
  2028. }
  2029. }
  2030. }//fi gray
  2031. }
  2032. else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
  2033. ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
  2034. }
  2035. } else {
  2036. /* dct only in intra block */
  2037. if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
  2038. put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
  2039. put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
  2040. put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
  2041. put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
  2042. if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
  2043. if(s->chroma_y_shift){
  2044. put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
  2045. put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
  2046. }else{
  2047. dct_offset >>=1;
  2048. dct_linesize >>=1;
  2049. put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
  2050. put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
  2051. put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
  2052. put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
  2053. }
  2054. }
  2055. }else{
  2056. s->dsp.idct_put(dest_y , dct_linesize, block[0]);
  2057. s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
  2058. s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
  2059. s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
  2060. if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
  2061. if(s->chroma_y_shift){
  2062. s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
  2063. s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
  2064. }else{
  2065. dct_linesize = uvlinesize << s->interlaced_dct;
  2066. dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
  2067. s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
  2068. s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
  2069. s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
  2070. s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
  2071. if(!s->chroma_x_shift){//Chroma444
  2072. s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
  2073. s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
  2074. s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
  2075. s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
  2076. }
  2077. }
  2078. }//gray
  2079. }
  2080. }
  2081. skip_idct:
  2082. if(!readable){
  2083. s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
  2084. s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
  2085. s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
  2086. }
  2087. }
  2088. }
  2089. void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
  2090. #if !CONFIG_SMALL
  2091. if(s->out_format == FMT_MPEG1) {
  2092. MPV_decode_mb_internal(s, block, 1);
  2093. } else
  2094. #endif
  2095. MPV_decode_mb_internal(s, block, 0);
  2096. }
  2097. /**
  2098. * @param h is the normal height, this will be reduced automatically if needed for the last row
  2099. */
  2100. void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
  2101. const int field_pic= s->picture_structure != PICT_FRAME;
  2102. if(field_pic){
  2103. h <<= 1;
  2104. y <<= 1;
  2105. }
  2106. if (!s->avctx->hwaccel
  2107. && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
  2108. && s->unrestricted_mv
  2109. && s->current_picture.f.reference
  2110. && !s->intra_only
  2111. && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
  2112. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
  2113. int sides = 0, edge_h;
  2114. int hshift = desc->log2_chroma_w;
  2115. int vshift = desc->log2_chroma_h;
  2116. if (y==0) sides |= EDGE_TOP;
  2117. if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
  2118. edge_h= FFMIN(h, s->v_edge_pos - y);
  2119. s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
  2120. s->linesize, s->h_edge_pos, edge_h,
  2121. EDGE_WIDTH, EDGE_WIDTH, sides);
  2122. s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
  2123. s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
  2124. EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
  2125. s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
  2126. s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
  2127. EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
  2128. }
  2129. h= FFMIN(h, s->avctx->height - y);
  2130. if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
  2131. if (s->avctx->draw_horiz_band) {
  2132. AVFrame *src;
  2133. int offset[AV_NUM_DATA_POINTERS];
  2134. int i;
  2135. if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
  2136. src = &s->current_picture_ptr->f;
  2137. else if(s->last_picture_ptr)
  2138. src = &s->last_picture_ptr->f;
  2139. else
  2140. return;
  2141. if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
  2142. for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
  2143. offset[i] = 0;
  2144. }else{
  2145. offset[0]= y * s->linesize;
  2146. offset[1]=
  2147. offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
  2148. for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
  2149. offset[i] = 0;
  2150. }
  2151. emms_c();
  2152. s->avctx->draw_horiz_band(s->avctx, src, offset,
  2153. y, s->picture_structure, h);
  2154. }
  2155. }
  2156. void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
  2157. const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
  2158. const int uvlinesize = s->current_picture.f.linesize[1];
  2159. const int mb_size= 4;
  2160. s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
  2161. s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
  2162. s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
  2163. s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
  2164. s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
  2165. s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
  2166. //block_index is not used by mpeg2, so it is not affected by chroma_format
  2167. s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
  2168. s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
  2169. s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
  2170. if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
  2171. {
  2172. if(s->picture_structure==PICT_FRAME){
  2173. s->dest[0] += s->mb_y * linesize << mb_size;
  2174. s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
  2175. s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
  2176. }else{
  2177. s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
  2178. s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
  2179. s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
  2180. assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
  2181. }
  2182. }
  2183. }
  2184. void ff_mpeg_flush(AVCodecContext *avctx){
  2185. int i;
  2186. MpegEncContext *s = avctx->priv_data;
  2187. if(s==NULL || s->picture==NULL)
  2188. return;
  2189. for(i=0; i<s->picture_count; i++){
  2190. if (s->picture[i].f.data[0] &&
  2191. (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
  2192. s->picture[i].f.type == FF_BUFFER_TYPE_USER))
  2193. free_frame_buffer(s, &s->picture[i]);
  2194. }
  2195. s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
  2196. s->mb_x= s->mb_y= 0;
  2197. s->parse_context.state= -1;
  2198. s->parse_context.frame_start_found= 0;
  2199. s->parse_context.overread= 0;
  2200. s->parse_context.overread_index= 0;
  2201. s->parse_context.index= 0;
  2202. s->parse_context.last_index= 0;
  2203. s->bitstream_buffer_size=0;
  2204. s->pp_time=0;
  2205. }
  2206. static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
  2207. DCTELEM *block, int n, int qscale)
  2208. {
  2209. int i, level, nCoeffs;
  2210. const uint16_t *quant_matrix;
  2211. nCoeffs= s->block_last_index[n];
  2212. if (n < 4)
  2213. block[0] = block[0] * s->y_dc_scale;
  2214. else
  2215. block[0] = block[0] * s->c_dc_scale;
  2216. /* XXX: only mpeg1 */
  2217. quant_matrix = s->intra_matrix;
  2218. for(i=1;i<=nCoeffs;i++) {
  2219. int j= s->intra_scantable.permutated[i];
  2220. level = block[j];
  2221. if (level) {
  2222. if (level < 0) {
  2223. level = -level;
  2224. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  2225. level = (level - 1) | 1;
  2226. level = -level;
  2227. } else {
  2228. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  2229. level = (level - 1) | 1;
  2230. }
  2231. block[j] = level;
  2232. }
  2233. }
  2234. }
  2235. static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
  2236. DCTELEM *block, int n, int qscale)
  2237. {
  2238. int i, level, nCoeffs;
  2239. const uint16_t *quant_matrix;
  2240. nCoeffs= s->block_last_index[n];
  2241. quant_matrix = s->inter_matrix;
  2242. for(i=0; i<=nCoeffs; i++) {
  2243. int j= s->intra_scantable.permutated[i];
  2244. level = block[j];
  2245. if (level) {
  2246. if (level < 0) {
  2247. level = -level;
  2248. level = (((level << 1) + 1) * qscale *
  2249. ((int) (quant_matrix[j]))) >> 4;
  2250. level = (level - 1) | 1;
  2251. level = -level;
  2252. } else {
  2253. level = (((level << 1) + 1) * qscale *
  2254. ((int) (quant_matrix[j]))) >> 4;
  2255. level = (level - 1) | 1;
  2256. }
  2257. block[j] = level;
  2258. }
  2259. }
  2260. }
  2261. static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
  2262. DCTELEM *block, int n, int qscale)
  2263. {
  2264. int i, level, nCoeffs;
  2265. const uint16_t *quant_matrix;
  2266. if(s->alternate_scan) nCoeffs= 63;
  2267. else nCoeffs= s->block_last_index[n];
  2268. if (n < 4)
  2269. block[0] = block[0] * s->y_dc_scale;
  2270. else
  2271. block[0] = block[0] * s->c_dc_scale;
  2272. quant_matrix = s->intra_matrix;
  2273. for(i=1;i<=nCoeffs;i++) {
  2274. int j= s->intra_scantable.permutated[i];
  2275. level = block[j];
  2276. if (level) {
  2277. if (level < 0) {
  2278. level = -level;
  2279. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  2280. level = -level;
  2281. } else {
  2282. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  2283. }
  2284. block[j] = level;
  2285. }
  2286. }
  2287. }
  2288. static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
  2289. DCTELEM *block, int n, int qscale)
  2290. {
  2291. int i, level, nCoeffs;
  2292. const uint16_t *quant_matrix;
  2293. int sum=-1;
  2294. if(s->alternate_scan) nCoeffs= 63;
  2295. else nCoeffs= s->block_last_index[n];
  2296. if (n < 4)
  2297. block[0] = block[0] * s->y_dc_scale;
  2298. else
  2299. block[0] = block[0] * s->c_dc_scale;
  2300. quant_matrix = s->intra_matrix;
  2301. for(i=1;i<=nCoeffs;i++) {
  2302. int j= s->intra_scantable.permutated[i];
  2303. level = block[j];
  2304. if (level) {
  2305. if (level < 0) {
  2306. level = -level;
  2307. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  2308. level = -level;
  2309. } else {
  2310. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  2311. }
  2312. block[j] = level;
  2313. sum+=level;
  2314. }
  2315. }
  2316. block[63]^=sum&1;
  2317. }
  2318. static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
  2319. DCTELEM *block, int n, int qscale)
  2320. {
  2321. int i, level, nCoeffs;
  2322. const uint16_t *quant_matrix;
  2323. int sum=-1;
  2324. if(s->alternate_scan) nCoeffs= 63;
  2325. else nCoeffs= s->block_last_index[n];
  2326. quant_matrix = s->inter_matrix;
  2327. for(i=0; i<=nCoeffs; i++) {
  2328. int j= s->intra_scantable.permutated[i];
  2329. level = block[j];
  2330. if (level) {
  2331. if (level < 0) {
  2332. level = -level;
  2333. level = (((level << 1) + 1) * qscale *
  2334. ((int) (quant_matrix[j]))) >> 4;
  2335. level = -level;
  2336. } else {
  2337. level = (((level << 1) + 1) * qscale *
  2338. ((int) (quant_matrix[j]))) >> 4;
  2339. }
  2340. block[j] = level;
  2341. sum+=level;
  2342. }
  2343. }
  2344. block[63]^=sum&1;
  2345. }
  2346. static void dct_unquantize_h263_intra_c(MpegEncContext *s,
  2347. DCTELEM *block, int n, int qscale)
  2348. {
  2349. int i, level, qmul, qadd;
  2350. int nCoeffs;
  2351. assert(s->block_last_index[n]>=0);
  2352. qmul = qscale << 1;
  2353. if (!s->h263_aic) {
  2354. if (n < 4)
  2355. block[0] = block[0] * s->y_dc_scale;
  2356. else
  2357. block[0] = block[0] * s->c_dc_scale;
  2358. qadd = (qscale - 1) | 1;
  2359. }else{
  2360. qadd = 0;
  2361. }
  2362. if(s->ac_pred)
  2363. nCoeffs=63;
  2364. else
  2365. nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
  2366. for(i=1; i<=nCoeffs; i++) {
  2367. level = block[i];
  2368. if (level) {
  2369. if (level < 0) {
  2370. level = level * qmul - qadd;
  2371. } else {
  2372. level = level * qmul + qadd;
  2373. }
  2374. block[i] = level;
  2375. }
  2376. }
  2377. }
  2378. static void dct_unquantize_h263_inter_c(MpegEncContext *s,
  2379. DCTELEM *block, int n, int qscale)
  2380. {
  2381. int i, level, qmul, qadd;
  2382. int nCoeffs;
  2383. assert(s->block_last_index[n]>=0);
  2384. qadd = (qscale - 1) | 1;
  2385. qmul = qscale << 1;
  2386. nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
  2387. for(i=0; i<=nCoeffs; i++) {
  2388. level = block[i];
  2389. if (level) {
  2390. if (level < 0) {
  2391. level = level * qmul - qadd;
  2392. } else {
  2393. level = level * qmul + qadd;
  2394. }
  2395. block[i] = level;
  2396. }
  2397. }
  2398. }
  2399. /**
  2400. * set qscale and update qscale dependent variables.
  2401. */
  2402. void ff_set_qscale(MpegEncContext * s, int qscale)
  2403. {
  2404. if (qscale < 1)
  2405. qscale = 1;
  2406. else if (qscale > 31)
  2407. qscale = 31;
  2408. s->qscale = qscale;
  2409. s->chroma_qscale= s->chroma_qscale_table[qscale];
  2410. s->y_dc_scale= s->y_dc_scale_table[ qscale ];
  2411. s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
  2412. }
  2413. void ff_MPV_report_decode_progress(MpegEncContext *s)
  2414. {
  2415. if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
  2416. ff_thread_report_progress(&s->current_picture_ptr->f, s->mb_y, 0);
  2417. }