You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3336 lines
121KB

  1. /*
  2. * The simplest mpeg encoder (well, it was the simplest!)
  3. * Copyright (c) 2000,2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
  7. *
  8. * This file is part of FFmpeg.
  9. *
  10. * FFmpeg is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * FFmpeg is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with FFmpeg; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. /**
  25. * @file
  26. * The simplest mpeg encoder (well, it was the simplest!).
  27. */
  28. #include "libavutil/attributes.h"
  29. #include "libavutil/avassert.h"
  30. #include "libavutil/imgutils.h"
  31. #include "avcodec.h"
  32. #include "dsputil.h"
  33. #include "h264chroma.h"
  34. #include "internal.h"
  35. #include "mathops.h"
  36. #include "mpegvideo.h"
  37. #include "mjpegenc.h"
  38. #include "msmpeg4.h"
  39. #include "xvmc_internal.h"
  40. #include "thread.h"
  41. #include <limits.h>
  42. static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
  43. int16_t *block, int n, int qscale);
  44. static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
  45. int16_t *block, int n, int qscale);
  46. static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
  47. int16_t *block, int n, int qscale);
  48. static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
  49. int16_t *block, int n, int qscale);
  50. static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
  51. int16_t *block, int n, int qscale);
  52. static void dct_unquantize_h263_intra_c(MpegEncContext *s,
  53. int16_t *block, int n, int qscale);
  54. static void dct_unquantize_h263_inter_c(MpegEncContext *s,
  55. int16_t *block, int n, int qscale);
  56. static const uint8_t ff_default_chroma_qscale_table[32] = {
  57. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  58. 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
  59. 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
  60. };
  61. const uint8_t ff_mpeg1_dc_scale_table[128] = {
  62. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  63. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  64. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  65. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  66. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  67. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  68. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  69. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  70. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  71. };
  72. static const uint8_t mpeg2_dc_scale_table1[128] = {
  73. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  74. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  75. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  76. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  77. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  78. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  79. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  80. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  81. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  82. };
  83. static const uint8_t mpeg2_dc_scale_table2[128] = {
  84. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  85. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  86. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  87. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  88. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  89. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  90. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  91. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  92. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  93. };
  94. static const uint8_t mpeg2_dc_scale_table3[128] = {
  95. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  96. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  97. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  98. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  99. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  100. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  101. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  102. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  103. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  104. };
  105. const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
  106. ff_mpeg1_dc_scale_table,
  107. mpeg2_dc_scale_table1,
  108. mpeg2_dc_scale_table2,
  109. mpeg2_dc_scale_table3,
  110. };
  111. const enum AVPixelFormat ff_pixfmt_list_420[] = {
  112. AV_PIX_FMT_YUV420P,
  113. AV_PIX_FMT_NONE
  114. };
  115. static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
  116. int (*mv)[2][4][2],
  117. int mb_x, int mb_y, int mb_intra, int mb_skipped)
  118. {
  119. MpegEncContext *s = opaque;
  120. s->mv_dir = mv_dir;
  121. s->mv_type = mv_type;
  122. s->mb_intra = mb_intra;
  123. s->mb_skipped = mb_skipped;
  124. s->mb_x = mb_x;
  125. s->mb_y = mb_y;
  126. memcpy(s->mv, mv, sizeof(*mv));
  127. ff_init_block_index(s);
  128. ff_update_block_index(s);
  129. s->dsp.clear_blocks(s->block[0]);
  130. s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
  131. s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
  132. s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
  133. if (ref)
  134. av_log(s->avctx, AV_LOG_DEBUG, "Interlaced error concealment is not fully implemented\n");
  135. ff_MPV_decode_mb(s, s->block);
  136. }
  137. /* init common dct for both encoder and decoder */
  138. av_cold int ff_dct_common_init(MpegEncContext *s)
  139. {
  140. ff_dsputil_init(&s->dsp, s->avctx);
  141. ff_h264chroma_init(&s->h264chroma, 8); //for lowres
  142. ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
  143. ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
  144. s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
  145. s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
  146. s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
  147. s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
  148. s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
  149. if (s->flags & CODEC_FLAG_BITEXACT)
  150. s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
  151. s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
  152. if (ARCH_ALPHA)
  153. ff_MPV_common_init_axp(s);
  154. if (ARCH_ARM)
  155. ff_MPV_common_init_arm(s);
  156. if (ARCH_BFIN)
  157. ff_MPV_common_init_bfin(s);
  158. if (ARCH_PPC)
  159. ff_MPV_common_init_ppc(s);
  160. if (ARCH_X86)
  161. ff_MPV_common_init_x86(s);
  162. /* load & permutate scantables
  163. * note: only wmv uses different ones
  164. */
  165. if (s->alternate_scan) {
  166. ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
  167. ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
  168. } else {
  169. ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
  170. ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
  171. }
  172. ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
  173. ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
  174. return 0;
  175. }
  176. int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
  177. {
  178. int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
  179. // edge emu needs blocksize + filter length - 1
  180. // (= 17x17 for halfpel / 21x21 for h264)
  181. // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
  182. // at uvlinesize. It supports only YUV420 so 24x24 is enough
  183. // linesize * interlaced * MBsize
  184. FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 24,
  185. fail);
  186. FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
  187. fail)
  188. s->me.temp = s->me.scratchpad;
  189. s->rd_scratchpad = s->me.scratchpad;
  190. s->b_scratchpad = s->me.scratchpad;
  191. s->obmc_scratchpad = s->me.scratchpad + 16;
  192. return 0;
  193. fail:
  194. av_freep(&s->edge_emu_buffer);
  195. return AVERROR(ENOMEM);
  196. }
  197. /**
  198. * Allocate a frame buffer
  199. */
  200. static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
  201. {
  202. int r, ret;
  203. pic->tf.f = &pic->f;
  204. if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
  205. s->codec_id != AV_CODEC_ID_VC1IMAGE &&
  206. s->codec_id != AV_CODEC_ID_MSS2)
  207. r = ff_thread_get_buffer(s->avctx, &pic->tf,
  208. pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
  209. else {
  210. pic->f.width = s->avctx->width;
  211. pic->f.height = s->avctx->height;
  212. pic->f.format = s->avctx->pix_fmt;
  213. r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
  214. }
  215. if (r < 0 || !pic->f.data[0]) {
  216. av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
  217. r, pic->f.data[0]);
  218. return -1;
  219. }
  220. if (s->avctx->hwaccel) {
  221. assert(!pic->hwaccel_picture_private);
  222. if (s->avctx->hwaccel->priv_data_size) {
  223. pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
  224. if (!pic->hwaccel_priv_buf) {
  225. av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
  226. return -1;
  227. }
  228. pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
  229. }
  230. }
  231. if (s->linesize && (s->linesize != pic->f.linesize[0] ||
  232. s->uvlinesize != pic->f.linesize[1])) {
  233. av_log(s->avctx, AV_LOG_ERROR,
  234. "get_buffer() failed (stride changed)\n");
  235. ff_mpeg_unref_picture(s, pic);
  236. return -1;
  237. }
  238. if (pic->f.linesize[1] != pic->f.linesize[2]) {
  239. av_log(s->avctx, AV_LOG_ERROR,
  240. "get_buffer() failed (uv stride mismatch)\n");
  241. ff_mpeg_unref_picture(s, pic);
  242. return -1;
  243. }
  244. if (!s->edge_emu_buffer &&
  245. (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
  246. av_log(s->avctx, AV_LOG_ERROR,
  247. "get_buffer() failed to allocate context scratch buffers.\n");
  248. ff_mpeg_unref_picture(s, pic);
  249. return ret;
  250. }
  251. return 0;
  252. }
  253. static void free_picture_tables(Picture *pic)
  254. {
  255. int i;
  256. pic->alloc_mb_width =
  257. pic->alloc_mb_height = 0;
  258. av_buffer_unref(&pic->mb_var_buf);
  259. av_buffer_unref(&pic->mc_mb_var_buf);
  260. av_buffer_unref(&pic->mb_mean_buf);
  261. av_buffer_unref(&pic->mbskip_table_buf);
  262. av_buffer_unref(&pic->qscale_table_buf);
  263. av_buffer_unref(&pic->mb_type_buf);
  264. for (i = 0; i < 2; i++) {
  265. av_buffer_unref(&pic->motion_val_buf[i]);
  266. av_buffer_unref(&pic->ref_index_buf[i]);
  267. }
  268. }
  269. static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
  270. {
  271. const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
  272. const int mb_array_size = s->mb_stride * s->mb_height;
  273. const int b8_array_size = s->b8_stride * s->mb_height * 2;
  274. int i;
  275. pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
  276. pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
  277. pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
  278. sizeof(uint32_t));
  279. if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
  280. return AVERROR(ENOMEM);
  281. if (s->encoding) {
  282. pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
  283. pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
  284. pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
  285. if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
  286. return AVERROR(ENOMEM);
  287. }
  288. if (s->out_format == FMT_H263 || s->encoding ||
  289. (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
  290. int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
  291. int ref_index_size = 4 * mb_array_size;
  292. for (i = 0; mv_size && i < 2; i++) {
  293. pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
  294. pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
  295. if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
  296. return AVERROR(ENOMEM);
  297. }
  298. }
  299. pic->alloc_mb_width = s->mb_width;
  300. pic->alloc_mb_height = s->mb_height;
  301. return 0;
  302. }
  303. static int make_tables_writable(Picture *pic)
  304. {
  305. int ret, i;
  306. #define MAKE_WRITABLE(table) \
  307. do {\
  308. if (pic->table &&\
  309. (ret = av_buffer_make_writable(&pic->table)) < 0)\
  310. return ret;\
  311. } while (0)
  312. MAKE_WRITABLE(mb_var_buf);
  313. MAKE_WRITABLE(mc_mb_var_buf);
  314. MAKE_WRITABLE(mb_mean_buf);
  315. MAKE_WRITABLE(mbskip_table_buf);
  316. MAKE_WRITABLE(qscale_table_buf);
  317. MAKE_WRITABLE(mb_type_buf);
  318. for (i = 0; i < 2; i++) {
  319. MAKE_WRITABLE(motion_val_buf[i]);
  320. MAKE_WRITABLE(ref_index_buf[i]);
  321. }
  322. return 0;
  323. }
  324. /**
  325. * Allocate a Picture.
  326. * The pixels are allocated/set by calling get_buffer() if shared = 0
  327. */
  328. int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
  329. {
  330. int i, ret;
  331. if (pic->qscale_table_buf)
  332. if ( pic->alloc_mb_width != s->mb_width
  333. || pic->alloc_mb_height != s->mb_height)
  334. free_picture_tables(pic);
  335. if (shared) {
  336. av_assert0(pic->f.data[0]);
  337. pic->shared = 1;
  338. } else {
  339. av_assert0(!pic->f.data[0]);
  340. if (alloc_frame_buffer(s, pic) < 0)
  341. return -1;
  342. s->linesize = pic->f.linesize[0];
  343. s->uvlinesize = pic->f.linesize[1];
  344. }
  345. if (!pic->qscale_table_buf)
  346. ret = alloc_picture_tables(s, pic);
  347. else
  348. ret = make_tables_writable(pic);
  349. if (ret < 0)
  350. goto fail;
  351. if (s->encoding) {
  352. pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
  353. pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
  354. pic->mb_mean = pic->mb_mean_buf->data;
  355. }
  356. pic->mbskip_table = pic->mbskip_table_buf->data;
  357. pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
  358. pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
  359. if (pic->motion_val_buf[0]) {
  360. for (i = 0; i < 2; i++) {
  361. pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
  362. pic->ref_index[i] = pic->ref_index_buf[i]->data;
  363. }
  364. }
  365. return 0;
  366. fail:
  367. av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
  368. ff_mpeg_unref_picture(s, pic);
  369. free_picture_tables(pic);
  370. return AVERROR(ENOMEM);
  371. }
  372. /**
  373. * Deallocate a picture.
  374. */
  375. void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
  376. {
  377. int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
  378. pic->tf.f = &pic->f;
  379. /* WM Image / Screen codecs allocate internal buffers with different
  380. * dimensions / colorspaces; ignore user-defined callbacks for these. */
  381. if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
  382. s->codec_id != AV_CODEC_ID_VC1IMAGE &&
  383. s->codec_id != AV_CODEC_ID_MSS2)
  384. ff_thread_release_buffer(s->avctx, &pic->tf);
  385. else
  386. av_frame_unref(&pic->f);
  387. av_buffer_unref(&pic->hwaccel_priv_buf);
  388. if (pic->needs_realloc)
  389. free_picture_tables(pic);
  390. memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
  391. }
  392. static int update_picture_tables(Picture *dst, Picture *src)
  393. {
  394. int i;
  395. #define UPDATE_TABLE(table)\
  396. do {\
  397. if (src->table &&\
  398. (!dst->table || dst->table->buffer != src->table->buffer)) {\
  399. av_buffer_unref(&dst->table);\
  400. dst->table = av_buffer_ref(src->table);\
  401. if (!dst->table) {\
  402. free_picture_tables(dst);\
  403. return AVERROR(ENOMEM);\
  404. }\
  405. }\
  406. } while (0)
  407. UPDATE_TABLE(mb_var_buf);
  408. UPDATE_TABLE(mc_mb_var_buf);
  409. UPDATE_TABLE(mb_mean_buf);
  410. UPDATE_TABLE(mbskip_table_buf);
  411. UPDATE_TABLE(qscale_table_buf);
  412. UPDATE_TABLE(mb_type_buf);
  413. for (i = 0; i < 2; i++) {
  414. UPDATE_TABLE(motion_val_buf[i]);
  415. UPDATE_TABLE(ref_index_buf[i]);
  416. }
  417. dst->mb_var = src->mb_var;
  418. dst->mc_mb_var = src->mc_mb_var;
  419. dst->mb_mean = src->mb_mean;
  420. dst->mbskip_table = src->mbskip_table;
  421. dst->qscale_table = src->qscale_table;
  422. dst->mb_type = src->mb_type;
  423. for (i = 0; i < 2; i++) {
  424. dst->motion_val[i] = src->motion_val[i];
  425. dst->ref_index[i] = src->ref_index[i];
  426. }
  427. dst->alloc_mb_width = src->alloc_mb_width;
  428. dst->alloc_mb_height = src->alloc_mb_height;
  429. return 0;
  430. }
  431. int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
  432. {
  433. int ret;
  434. av_assert0(!dst->f.buf[0]);
  435. av_assert0(src->f.buf[0]);
  436. src->tf.f = &src->f;
  437. dst->tf.f = &dst->f;
  438. ret = ff_thread_ref_frame(&dst->tf, &src->tf);
  439. if (ret < 0)
  440. goto fail;
  441. ret = update_picture_tables(dst, src);
  442. if (ret < 0)
  443. goto fail;
  444. if (src->hwaccel_picture_private) {
  445. dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
  446. if (!dst->hwaccel_priv_buf)
  447. goto fail;
  448. dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
  449. }
  450. dst->field_picture = src->field_picture;
  451. dst->mb_var_sum = src->mb_var_sum;
  452. dst->mc_mb_var_sum = src->mc_mb_var_sum;
  453. dst->b_frame_score = src->b_frame_score;
  454. dst->needs_realloc = src->needs_realloc;
  455. dst->reference = src->reference;
  456. dst->shared = src->shared;
  457. return 0;
  458. fail:
  459. ff_mpeg_unref_picture(s, dst);
  460. return ret;
  461. }
  462. static int init_duplicate_context(MpegEncContext *s)
  463. {
  464. int y_size = s->b8_stride * (2 * s->mb_height + 1);
  465. int c_size = s->mb_stride * (s->mb_height + 1);
  466. int yc_size = y_size + 2 * c_size;
  467. int i;
  468. s->edge_emu_buffer =
  469. s->me.scratchpad =
  470. s->me.temp =
  471. s->rd_scratchpad =
  472. s->b_scratchpad =
  473. s->obmc_scratchpad = NULL;
  474. if (s->encoding) {
  475. FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
  476. ME_MAP_SIZE * sizeof(uint32_t), fail)
  477. FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
  478. ME_MAP_SIZE * sizeof(uint32_t), fail)
  479. if (s->avctx->noise_reduction) {
  480. FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
  481. 2 * 64 * sizeof(int), fail)
  482. }
  483. }
  484. FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
  485. s->block = s->blocks[0];
  486. for (i = 0; i < 12; i++) {
  487. s->pblocks[i] = &s->block[i];
  488. }
  489. if (s->out_format == FMT_H263) {
  490. /* ac values */
  491. FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
  492. yc_size * sizeof(int16_t) * 16, fail);
  493. s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
  494. s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
  495. s->ac_val[2] = s->ac_val[1] + c_size;
  496. }
  497. return 0;
  498. fail:
  499. return -1; // free() through ff_MPV_common_end()
  500. }
  501. static void free_duplicate_context(MpegEncContext *s)
  502. {
  503. if (s == NULL)
  504. return;
  505. av_freep(&s->edge_emu_buffer);
  506. av_freep(&s->me.scratchpad);
  507. s->me.temp =
  508. s->rd_scratchpad =
  509. s->b_scratchpad =
  510. s->obmc_scratchpad = NULL;
  511. av_freep(&s->dct_error_sum);
  512. av_freep(&s->me.map);
  513. av_freep(&s->me.score_map);
  514. av_freep(&s->blocks);
  515. av_freep(&s->ac_val_base);
  516. s->block = NULL;
  517. }
  518. static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
  519. {
  520. #define COPY(a) bak->a = src->a
  521. COPY(edge_emu_buffer);
  522. COPY(me.scratchpad);
  523. COPY(me.temp);
  524. COPY(rd_scratchpad);
  525. COPY(b_scratchpad);
  526. COPY(obmc_scratchpad);
  527. COPY(me.map);
  528. COPY(me.score_map);
  529. COPY(blocks);
  530. COPY(block);
  531. COPY(start_mb_y);
  532. COPY(end_mb_y);
  533. COPY(me.map_generation);
  534. COPY(pb);
  535. COPY(dct_error_sum);
  536. COPY(dct_count[0]);
  537. COPY(dct_count[1]);
  538. COPY(ac_val_base);
  539. COPY(ac_val[0]);
  540. COPY(ac_val[1]);
  541. COPY(ac_val[2]);
  542. #undef COPY
  543. }
  544. int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
  545. {
  546. MpegEncContext bak;
  547. int i, ret;
  548. // FIXME copy only needed parts
  549. // START_TIMER
  550. backup_duplicate_context(&bak, dst);
  551. memcpy(dst, src, sizeof(MpegEncContext));
  552. backup_duplicate_context(dst, &bak);
  553. for (i = 0; i < 12; i++) {
  554. dst->pblocks[i] = &dst->block[i];
  555. }
  556. if (!dst->edge_emu_buffer &&
  557. (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
  558. av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
  559. "scratch buffers.\n");
  560. return ret;
  561. }
  562. // STOP_TIMER("update_duplicate_context")
  563. // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
  564. return 0;
  565. }
  566. int ff_mpeg_update_thread_context(AVCodecContext *dst,
  567. const AVCodecContext *src)
  568. {
  569. int i, ret;
  570. MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
  571. if (dst == src)
  572. return 0;
  573. av_assert0(s != s1);
  574. // FIXME can parameters change on I-frames?
  575. // in that case dst may need a reinit
  576. if (!s->context_initialized) {
  577. memcpy(s, s1, sizeof(MpegEncContext));
  578. s->avctx = dst;
  579. s->bitstream_buffer = NULL;
  580. s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
  581. if (s1->context_initialized){
  582. // s->picture_range_start += MAX_PICTURE_COUNT;
  583. // s->picture_range_end += MAX_PICTURE_COUNT;
  584. if((ret = ff_MPV_common_init(s)) < 0){
  585. memset(s, 0, sizeof(MpegEncContext));
  586. s->avctx = dst;
  587. return ret;
  588. }
  589. }
  590. }
  591. if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
  592. s->context_reinit = 0;
  593. s->height = s1->height;
  594. s->width = s1->width;
  595. if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
  596. return ret;
  597. }
  598. s->avctx->coded_height = s1->avctx->coded_height;
  599. s->avctx->coded_width = s1->avctx->coded_width;
  600. s->avctx->width = s1->avctx->width;
  601. s->avctx->height = s1->avctx->height;
  602. s->coded_picture_number = s1->coded_picture_number;
  603. s->picture_number = s1->picture_number;
  604. s->input_picture_number = s1->input_picture_number;
  605. av_assert0(!s->picture || s->picture != s1->picture);
  606. if(s->picture)
  607. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  608. ff_mpeg_unref_picture(s, &s->picture[i]);
  609. if (s1->picture[i].f.data[0] &&
  610. (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
  611. return ret;
  612. }
  613. #define UPDATE_PICTURE(pic)\
  614. do {\
  615. ff_mpeg_unref_picture(s, &s->pic);\
  616. if (s1->pic.f.data[0])\
  617. ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
  618. else\
  619. ret = update_picture_tables(&s->pic, &s1->pic);\
  620. if (ret < 0)\
  621. return ret;\
  622. } while (0)
  623. UPDATE_PICTURE(current_picture);
  624. UPDATE_PICTURE(last_picture);
  625. UPDATE_PICTURE(next_picture);
  626. s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
  627. s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
  628. s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
  629. // Error/bug resilience
  630. s->next_p_frame_damaged = s1->next_p_frame_damaged;
  631. s->workaround_bugs = s1->workaround_bugs;
  632. s->padding_bug_score = s1->padding_bug_score;
  633. // MPEG4 timing info
  634. memcpy(&s->time_increment_bits, &s1->time_increment_bits,
  635. (char *) &s1->shape - (char *) &s1->time_increment_bits);
  636. // B-frame info
  637. s->max_b_frames = s1->max_b_frames;
  638. s->low_delay = s1->low_delay;
  639. s->droppable = s1->droppable;
  640. // DivX handling (doesn't work)
  641. s->divx_packed = s1->divx_packed;
  642. if (s1->bitstream_buffer) {
  643. if (s1->bitstream_buffer_size +
  644. FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
  645. av_fast_malloc(&s->bitstream_buffer,
  646. &s->allocated_bitstream_buffer_size,
  647. s1->allocated_bitstream_buffer_size);
  648. s->bitstream_buffer_size = s1->bitstream_buffer_size;
  649. memcpy(s->bitstream_buffer, s1->bitstream_buffer,
  650. s1->bitstream_buffer_size);
  651. memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
  652. FF_INPUT_BUFFER_PADDING_SIZE);
  653. }
  654. // linesize dependend scratch buffer allocation
  655. if (!s->edge_emu_buffer)
  656. if (s1->linesize) {
  657. if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
  658. av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
  659. "scratch buffers.\n");
  660. return AVERROR(ENOMEM);
  661. }
  662. } else {
  663. av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
  664. "be allocated due to unknown size.\n");
  665. }
  666. // MPEG2/interlacing info
  667. memcpy(&s->progressive_sequence, &s1->progressive_sequence,
  668. (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
  669. if (!s1->first_field) {
  670. s->last_pict_type = s1->pict_type;
  671. if (s1->current_picture_ptr)
  672. s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
  673. if (s1->pict_type != AV_PICTURE_TYPE_B) {
  674. s->last_non_b_pict_type = s1->pict_type;
  675. }
  676. }
  677. return 0;
  678. }
  679. /**
  680. * Set the given MpegEncContext to common defaults
  681. * (same for encoding and decoding).
  682. * The changed fields will not depend upon the
  683. * prior state of the MpegEncContext.
  684. */
  685. void ff_MPV_common_defaults(MpegEncContext *s)
  686. {
  687. s->y_dc_scale_table =
  688. s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
  689. s->chroma_qscale_table = ff_default_chroma_qscale_table;
  690. s->progressive_frame = 1;
  691. s->progressive_sequence = 1;
  692. s->picture_structure = PICT_FRAME;
  693. s->coded_picture_number = 0;
  694. s->picture_number = 0;
  695. s->input_picture_number = 0;
  696. s->picture_in_gop_number = 0;
  697. s->f_code = 1;
  698. s->b_code = 1;
  699. s->slice_context_count = 1;
  700. }
  701. /**
  702. * Set the given MpegEncContext to defaults for decoding.
  703. * the changed fields will not depend upon
  704. * the prior state of the MpegEncContext.
  705. */
  706. void ff_MPV_decode_defaults(MpegEncContext *s)
  707. {
  708. ff_MPV_common_defaults(s);
  709. }
  710. static int init_er(MpegEncContext *s)
  711. {
  712. ERContext *er = &s->er;
  713. int mb_array_size = s->mb_height * s->mb_stride;
  714. int i;
  715. er->avctx = s->avctx;
  716. er->dsp = &s->dsp;
  717. er->mb_index2xy = s->mb_index2xy;
  718. er->mb_num = s->mb_num;
  719. er->mb_width = s->mb_width;
  720. er->mb_height = s->mb_height;
  721. er->mb_stride = s->mb_stride;
  722. er->b8_stride = s->b8_stride;
  723. er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
  724. er->error_status_table = av_mallocz(mb_array_size);
  725. if (!er->er_temp_buffer || !er->error_status_table)
  726. goto fail;
  727. er->mbskip_table = s->mbskip_table;
  728. er->mbintra_table = s->mbintra_table;
  729. for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
  730. er->dc_val[i] = s->dc_val[i];
  731. er->decode_mb = mpeg_er_decode_mb;
  732. er->opaque = s;
  733. return 0;
  734. fail:
  735. av_freep(&er->er_temp_buffer);
  736. av_freep(&er->error_status_table);
  737. return AVERROR(ENOMEM);
  738. }
  739. /**
  740. * Initialize and allocates MpegEncContext fields dependent on the resolution.
  741. */
  742. static int init_context_frame(MpegEncContext *s)
  743. {
  744. int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
  745. s->mb_width = (s->width + 15) / 16;
  746. s->mb_stride = s->mb_width + 1;
  747. s->b8_stride = s->mb_width * 2 + 1;
  748. s->b4_stride = s->mb_width * 4 + 1;
  749. mb_array_size = s->mb_height * s->mb_stride;
  750. mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
  751. /* set default edge pos, will be overriden
  752. * in decode_header if needed */
  753. s->h_edge_pos = s->mb_width * 16;
  754. s->v_edge_pos = s->mb_height * 16;
  755. s->mb_num = s->mb_width * s->mb_height;
  756. s->block_wrap[0] =
  757. s->block_wrap[1] =
  758. s->block_wrap[2] =
  759. s->block_wrap[3] = s->b8_stride;
  760. s->block_wrap[4] =
  761. s->block_wrap[5] = s->mb_stride;
  762. y_size = s->b8_stride * (2 * s->mb_height + 1);
  763. c_size = s->mb_stride * (s->mb_height + 1);
  764. yc_size = y_size + 2 * c_size;
  765. FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
  766. for (y = 0; y < s->mb_height; y++)
  767. for (x = 0; x < s->mb_width; x++)
  768. s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
  769. s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
  770. if (s->encoding) {
  771. /* Allocate MV tables */
  772. FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
  773. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
  774. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
  775. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
  776. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
  777. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
  778. s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
  779. s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
  780. s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
  781. s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
  782. s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
  783. s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
  784. /* Allocate MB type table */
  785. FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
  786. FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
  787. FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
  788. mb_array_size * sizeof(float), fail);
  789. FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
  790. mb_array_size * sizeof(float), fail);
  791. }
  792. if (s->codec_id == AV_CODEC_ID_MPEG4 ||
  793. (s->flags & CODEC_FLAG_INTERLACED_ME)) {
  794. /* interlaced direct mode decoding tables */
  795. for (i = 0; i < 2; i++) {
  796. int j, k;
  797. for (j = 0; j < 2; j++) {
  798. for (k = 0; k < 2; k++) {
  799. FF_ALLOCZ_OR_GOTO(s->avctx,
  800. s->b_field_mv_table_base[i][j][k],
  801. mv_table_size * 2 * sizeof(int16_t),
  802. fail);
  803. s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
  804. s->mb_stride + 1;
  805. }
  806. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
  807. FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
  808. s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
  809. }
  810. FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
  811. }
  812. }
  813. if (s->out_format == FMT_H263) {
  814. /* cbp values */
  815. FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
  816. s->coded_block = s->coded_block_base + s->b8_stride + 1;
  817. /* cbp, ac_pred, pred_dir */
  818. FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
  819. FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
  820. }
  821. if (s->h263_pred || s->h263_plus || !s->encoding) {
  822. /* dc values */
  823. // MN: we need these for error resilience of intra-frames
  824. FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
  825. s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
  826. s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
  827. s->dc_val[2] = s->dc_val[1] + c_size;
  828. for (i = 0; i < yc_size; i++)
  829. s->dc_val_base[i] = 1024;
  830. }
  831. /* which mb is a intra block */
  832. FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
  833. memset(s->mbintra_table, 1, mb_array_size);
  834. /* init macroblock skip table */
  835. FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
  836. // Note the + 1 is for a quicker mpeg4 slice_end detection
  837. return init_er(s);
  838. fail:
  839. return AVERROR(ENOMEM);
  840. }
  841. /**
  842. * init common structure for both encoder and decoder.
  843. * this assumes that some variables like width/height are already set
  844. */
  845. av_cold int ff_MPV_common_init(MpegEncContext *s)
  846. {
  847. int i;
  848. int nb_slices = (HAVE_THREADS &&
  849. s->avctx->active_thread_type & FF_THREAD_SLICE) ?
  850. s->avctx->thread_count : 1;
  851. if (s->encoding && s->avctx->slices)
  852. nb_slices = s->avctx->slices;
  853. if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
  854. s->mb_height = (s->height + 31) / 32 * 2;
  855. else
  856. s->mb_height = (s->height + 15) / 16;
  857. if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
  858. av_log(s->avctx, AV_LOG_ERROR,
  859. "decoding to AV_PIX_FMT_NONE is not supported.\n");
  860. return -1;
  861. }
  862. if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
  863. int max_slices;
  864. if (s->mb_height)
  865. max_slices = FFMIN(MAX_THREADS, s->mb_height);
  866. else
  867. max_slices = MAX_THREADS;
  868. av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
  869. " reducing to %d\n", nb_slices, max_slices);
  870. nb_slices = max_slices;
  871. }
  872. if ((s->width || s->height) &&
  873. av_image_check_size(s->width, s->height, 0, s->avctx))
  874. return -1;
  875. ff_dct_common_init(s);
  876. s->flags = s->avctx->flags;
  877. s->flags2 = s->avctx->flags2;
  878. /* set chroma shifts */
  879. avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift);
  880. /* convert fourcc to upper case */
  881. s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
  882. s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
  883. s->avctx->coded_frame = &s->current_picture.f;
  884. if (s->encoding) {
  885. if (s->msmpeg4_version) {
  886. FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
  887. 2 * 2 * (MAX_LEVEL + 1) *
  888. (MAX_RUN + 1) * 2 * sizeof(int), fail);
  889. }
  890. FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
  891. FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail)
  892. FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail)
  893. FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail)
  894. FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
  895. FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
  896. FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
  897. FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
  898. FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
  899. if (s->avctx->noise_reduction) {
  900. FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail);
  901. }
  902. }
  903. FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
  904. MAX_PICTURE_COUNT * sizeof(Picture), fail);
  905. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  906. avcodec_get_frame_defaults(&s->picture[i].f);
  907. }
  908. memset(&s->next_picture, 0, sizeof(s->next_picture));
  909. memset(&s->last_picture, 0, sizeof(s->last_picture));
  910. memset(&s->current_picture, 0, sizeof(s->current_picture));
  911. avcodec_get_frame_defaults(&s->next_picture.f);
  912. avcodec_get_frame_defaults(&s->last_picture.f);
  913. avcodec_get_frame_defaults(&s->current_picture.f);
  914. if (init_context_frame(s))
  915. goto fail;
  916. s->parse_context.state = -1;
  917. s->context_initialized = 1;
  918. s->thread_context[0] = s;
  919. // if (s->width && s->height) {
  920. if (nb_slices > 1) {
  921. for (i = 1; i < nb_slices; i++) {
  922. s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
  923. memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
  924. }
  925. for (i = 0; i < nb_slices; i++) {
  926. if (init_duplicate_context(s->thread_context[i]) < 0)
  927. goto fail;
  928. s->thread_context[i]->start_mb_y =
  929. (s->mb_height * (i) + nb_slices / 2) / nb_slices;
  930. s->thread_context[i]->end_mb_y =
  931. (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
  932. }
  933. } else {
  934. if (init_duplicate_context(s) < 0)
  935. goto fail;
  936. s->start_mb_y = 0;
  937. s->end_mb_y = s->mb_height;
  938. }
  939. s->slice_context_count = nb_slices;
  940. // }
  941. return 0;
  942. fail:
  943. ff_MPV_common_end(s);
  944. return -1;
  945. }
  946. /**
  947. * Frees and resets MpegEncContext fields depending on the resolution.
  948. * Is used during resolution changes to avoid a full reinitialization of the
  949. * codec.
  950. */
  951. static int free_context_frame(MpegEncContext *s)
  952. {
  953. int i, j, k;
  954. av_freep(&s->mb_type);
  955. av_freep(&s->p_mv_table_base);
  956. av_freep(&s->b_forw_mv_table_base);
  957. av_freep(&s->b_back_mv_table_base);
  958. av_freep(&s->b_bidir_forw_mv_table_base);
  959. av_freep(&s->b_bidir_back_mv_table_base);
  960. av_freep(&s->b_direct_mv_table_base);
  961. s->p_mv_table = NULL;
  962. s->b_forw_mv_table = NULL;
  963. s->b_back_mv_table = NULL;
  964. s->b_bidir_forw_mv_table = NULL;
  965. s->b_bidir_back_mv_table = NULL;
  966. s->b_direct_mv_table = NULL;
  967. for (i = 0; i < 2; i++) {
  968. for (j = 0; j < 2; j++) {
  969. for (k = 0; k < 2; k++) {
  970. av_freep(&s->b_field_mv_table_base[i][j][k]);
  971. s->b_field_mv_table[i][j][k] = NULL;
  972. }
  973. av_freep(&s->b_field_select_table[i][j]);
  974. av_freep(&s->p_field_mv_table_base[i][j]);
  975. s->p_field_mv_table[i][j] = NULL;
  976. }
  977. av_freep(&s->p_field_select_table[i]);
  978. }
  979. av_freep(&s->dc_val_base);
  980. av_freep(&s->coded_block_base);
  981. av_freep(&s->mbintra_table);
  982. av_freep(&s->cbp_table);
  983. av_freep(&s->pred_dir_table);
  984. av_freep(&s->mbskip_table);
  985. av_freep(&s->er.error_status_table);
  986. av_freep(&s->er.er_temp_buffer);
  987. av_freep(&s->mb_index2xy);
  988. av_freep(&s->lambda_table);
  989. av_freep(&s->cplx_tab);
  990. av_freep(&s->bits_tab);
  991. s->linesize = s->uvlinesize = 0;
  992. return 0;
  993. }
  994. int ff_MPV_common_frame_size_change(MpegEncContext *s)
  995. {
  996. int i, err = 0;
  997. if (s->slice_context_count > 1) {
  998. for (i = 0; i < s->slice_context_count; i++) {
  999. free_duplicate_context(s->thread_context[i]);
  1000. }
  1001. for (i = 1; i < s->slice_context_count; i++) {
  1002. av_freep(&s->thread_context[i]);
  1003. }
  1004. } else
  1005. free_duplicate_context(s);
  1006. if ((err = free_context_frame(s)) < 0)
  1007. return err;
  1008. if (s->picture)
  1009. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1010. s->picture[i].needs_realloc = 1;
  1011. }
  1012. s->last_picture_ptr =
  1013. s->next_picture_ptr =
  1014. s->current_picture_ptr = NULL;
  1015. // init
  1016. if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
  1017. s->mb_height = (s->height + 31) / 32 * 2;
  1018. else
  1019. s->mb_height = (s->height + 15) / 16;
  1020. if ((s->width || s->height) &&
  1021. av_image_check_size(s->width, s->height, 0, s->avctx))
  1022. return AVERROR_INVALIDDATA;
  1023. if ((err = init_context_frame(s)))
  1024. goto fail;
  1025. s->thread_context[0] = s;
  1026. if (s->width && s->height) {
  1027. int nb_slices = s->slice_context_count;
  1028. if (nb_slices > 1) {
  1029. for (i = 1; i < nb_slices; i++) {
  1030. s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
  1031. memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
  1032. }
  1033. for (i = 0; i < nb_slices; i++) {
  1034. if (init_duplicate_context(s->thread_context[i]) < 0)
  1035. goto fail;
  1036. s->thread_context[i]->start_mb_y =
  1037. (s->mb_height * (i) + nb_slices / 2) / nb_slices;
  1038. s->thread_context[i]->end_mb_y =
  1039. (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
  1040. }
  1041. } else {
  1042. err = init_duplicate_context(s);
  1043. if (err < 0)
  1044. goto fail;
  1045. s->start_mb_y = 0;
  1046. s->end_mb_y = s->mb_height;
  1047. }
  1048. s->slice_context_count = nb_slices;
  1049. }
  1050. return 0;
  1051. fail:
  1052. ff_MPV_common_end(s);
  1053. return err;
  1054. }
  1055. /* init common structure for both encoder and decoder */
  1056. void ff_MPV_common_end(MpegEncContext *s)
  1057. {
  1058. int i;
  1059. if (s->slice_context_count > 1) {
  1060. for (i = 0; i < s->slice_context_count; i++) {
  1061. free_duplicate_context(s->thread_context[i]);
  1062. }
  1063. for (i = 1; i < s->slice_context_count; i++) {
  1064. av_freep(&s->thread_context[i]);
  1065. }
  1066. s->slice_context_count = 1;
  1067. } else free_duplicate_context(s);
  1068. av_freep(&s->parse_context.buffer);
  1069. s->parse_context.buffer_size = 0;
  1070. av_freep(&s->bitstream_buffer);
  1071. s->allocated_bitstream_buffer_size = 0;
  1072. av_freep(&s->avctx->stats_out);
  1073. av_freep(&s->ac_stats);
  1074. if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
  1075. if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
  1076. s->q_chroma_intra_matrix= NULL;
  1077. s->q_chroma_intra_matrix16= NULL;
  1078. av_freep(&s->q_intra_matrix);
  1079. av_freep(&s->q_inter_matrix);
  1080. av_freep(&s->q_intra_matrix16);
  1081. av_freep(&s->q_inter_matrix16);
  1082. av_freep(&s->input_picture);
  1083. av_freep(&s->reordered_input_picture);
  1084. av_freep(&s->dct_offset);
  1085. if (s->picture) {
  1086. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1087. free_picture_tables(&s->picture[i]);
  1088. ff_mpeg_unref_picture(s, &s->picture[i]);
  1089. }
  1090. }
  1091. av_freep(&s->picture);
  1092. free_picture_tables(&s->last_picture);
  1093. ff_mpeg_unref_picture(s, &s->last_picture);
  1094. free_picture_tables(&s->current_picture);
  1095. ff_mpeg_unref_picture(s, &s->current_picture);
  1096. free_picture_tables(&s->next_picture);
  1097. ff_mpeg_unref_picture(s, &s->next_picture);
  1098. free_picture_tables(&s->new_picture);
  1099. ff_mpeg_unref_picture(s, &s->new_picture);
  1100. free_context_frame(s);
  1101. s->context_initialized = 0;
  1102. s->last_picture_ptr =
  1103. s->next_picture_ptr =
  1104. s->current_picture_ptr = NULL;
  1105. s->linesize = s->uvlinesize = 0;
  1106. }
  1107. av_cold void ff_init_rl(RLTable *rl,
  1108. uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
  1109. {
  1110. int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
  1111. uint8_t index_run[MAX_RUN + 1];
  1112. int last, run, level, start, end, i;
  1113. /* If table is static, we can quit if rl->max_level[0] is not NULL */
  1114. if (static_store && rl->max_level[0])
  1115. return;
  1116. /* compute max_level[], max_run[] and index_run[] */
  1117. for (last = 0; last < 2; last++) {
  1118. if (last == 0) {
  1119. start = 0;
  1120. end = rl->last;
  1121. } else {
  1122. start = rl->last;
  1123. end = rl->n;
  1124. }
  1125. memset(max_level, 0, MAX_RUN + 1);
  1126. memset(max_run, 0, MAX_LEVEL + 1);
  1127. memset(index_run, rl->n, MAX_RUN + 1);
  1128. for (i = start; i < end; i++) {
  1129. run = rl->table_run[i];
  1130. level = rl->table_level[i];
  1131. if (index_run[run] == rl->n)
  1132. index_run[run] = i;
  1133. if (level > max_level[run])
  1134. max_level[run] = level;
  1135. if (run > max_run[level])
  1136. max_run[level] = run;
  1137. }
  1138. if (static_store)
  1139. rl->max_level[last] = static_store[last];
  1140. else
  1141. rl->max_level[last] = av_malloc(MAX_RUN + 1);
  1142. memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
  1143. if (static_store)
  1144. rl->max_run[last] = static_store[last] + MAX_RUN + 1;
  1145. else
  1146. rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
  1147. memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
  1148. if (static_store)
  1149. rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
  1150. else
  1151. rl->index_run[last] = av_malloc(MAX_RUN + 1);
  1152. memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
  1153. }
  1154. }
  1155. av_cold void ff_init_vlc_rl(RLTable *rl)
  1156. {
  1157. int i, q;
  1158. for (q = 0; q < 32; q++) {
  1159. int qmul = q * 2;
  1160. int qadd = (q - 1) | 1;
  1161. if (q == 0) {
  1162. qmul = 1;
  1163. qadd = 0;
  1164. }
  1165. for (i = 0; i < rl->vlc.table_size; i++) {
  1166. int code = rl->vlc.table[i][0];
  1167. int len = rl->vlc.table[i][1];
  1168. int level, run;
  1169. if (len == 0) { // illegal code
  1170. run = 66;
  1171. level = MAX_LEVEL;
  1172. } else if (len < 0) { // more bits needed
  1173. run = 0;
  1174. level = code;
  1175. } else {
  1176. if (code == rl->n) { // esc
  1177. run = 66;
  1178. level = 0;
  1179. } else {
  1180. run = rl->table_run[code] + 1;
  1181. level = rl->table_level[code] * qmul + qadd;
  1182. if (code >= rl->last) run += 192;
  1183. }
  1184. }
  1185. rl->rl_vlc[q][i].len = len;
  1186. rl->rl_vlc[q][i].level = level;
  1187. rl->rl_vlc[q][i].run = run;
  1188. }
  1189. }
  1190. }
  1191. void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
  1192. {
  1193. int i;
  1194. /* release non reference frames */
  1195. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1196. if (!s->picture[i].reference &&
  1197. (remove_current || &s->picture[i] != s->current_picture_ptr)) {
  1198. ff_mpeg_unref_picture(s, &s->picture[i]);
  1199. }
  1200. }
  1201. }
  1202. static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
  1203. {
  1204. if (pic == s->last_picture_ptr)
  1205. return 0;
  1206. if (pic->f.data[0] == NULL)
  1207. return 1;
  1208. if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
  1209. return 1;
  1210. return 0;
  1211. }
  1212. static int find_unused_picture(MpegEncContext *s, int shared)
  1213. {
  1214. int i;
  1215. if (shared) {
  1216. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1217. if (s->picture[i].f.data[0] == NULL && &s->picture[i] != s->last_picture_ptr)
  1218. return i;
  1219. }
  1220. } else {
  1221. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1222. if (pic_is_unused(s, &s->picture[i]))
  1223. return i;
  1224. }
  1225. }
  1226. av_log(s->avctx, AV_LOG_FATAL,
  1227. "Internal error, picture buffer overflow\n");
  1228. /* We could return -1, but the codec would crash trying to draw into a
  1229. * non-existing frame anyway. This is safer than waiting for a random crash.
  1230. * Also the return of this is never useful, an encoder must only allocate
  1231. * as much as allowed in the specification. This has no relationship to how
  1232. * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
  1233. * enough for such valid streams).
  1234. * Plus, a decoder has to check stream validity and remove frames if too
  1235. * many reference frames are around. Waiting for "OOM" is not correct at
  1236. * all. Similarly, missing reference frames have to be replaced by
  1237. * interpolated/MC frames, anything else is a bug in the codec ...
  1238. */
  1239. abort();
  1240. return -1;
  1241. }
  1242. int ff_find_unused_picture(MpegEncContext *s, int shared)
  1243. {
  1244. int ret = find_unused_picture(s, shared);
  1245. if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
  1246. if (s->picture[ret].needs_realloc) {
  1247. s->picture[ret].needs_realloc = 0;
  1248. free_picture_tables(&s->picture[ret]);
  1249. ff_mpeg_unref_picture(s, &s->picture[ret]);
  1250. avcodec_get_frame_defaults(&s->picture[ret].f);
  1251. }
  1252. }
  1253. return ret;
  1254. }
  1255. static void update_noise_reduction(MpegEncContext *s)
  1256. {
  1257. int intra, i;
  1258. for (intra = 0; intra < 2; intra++) {
  1259. if (s->dct_count[intra] > (1 << 16)) {
  1260. for (i = 0; i < 64; i++) {
  1261. s->dct_error_sum[intra][i] >>= 1;
  1262. }
  1263. s->dct_count[intra] >>= 1;
  1264. }
  1265. for (i = 0; i < 64; i++) {
  1266. s->dct_offset[intra][i] = (s->avctx->noise_reduction *
  1267. s->dct_count[intra] +
  1268. s->dct_error_sum[intra][i] / 2) /
  1269. (s->dct_error_sum[intra][i] + 1);
  1270. }
  1271. }
  1272. }
  1273. /**
  1274. * generic function for encode/decode called after coding/decoding
  1275. * the header and before a frame is coded/decoded.
  1276. */
  1277. int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
  1278. {
  1279. int i, ret;
  1280. Picture *pic;
  1281. s->mb_skipped = 0;
  1282. if (!ff_thread_can_start_frame(avctx)) {
  1283. av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
  1284. return -1;
  1285. }
  1286. /* mark & release old frames */
  1287. if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
  1288. s->last_picture_ptr != s->next_picture_ptr &&
  1289. s->last_picture_ptr->f.data[0]) {
  1290. ff_mpeg_unref_picture(s, s->last_picture_ptr);
  1291. }
  1292. /* release forgotten pictures */
  1293. /* if (mpeg124/h263) */
  1294. if (!s->encoding) {
  1295. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1296. if (&s->picture[i] != s->last_picture_ptr &&
  1297. &s->picture[i] != s->next_picture_ptr &&
  1298. s->picture[i].reference && !s->picture[i].needs_realloc) {
  1299. if (!(avctx->active_thread_type & FF_THREAD_FRAME))
  1300. av_log(avctx, AV_LOG_ERROR,
  1301. "releasing zombie picture\n");
  1302. ff_mpeg_unref_picture(s, &s->picture[i]);
  1303. }
  1304. }
  1305. }
  1306. ff_mpeg_unref_picture(s, &s->current_picture);
  1307. if (!s->encoding) {
  1308. ff_release_unused_pictures(s, 1);
  1309. if (s->current_picture_ptr &&
  1310. s->current_picture_ptr->f.data[0] == NULL) {
  1311. // we already have a unused image
  1312. // (maybe it was set before reading the header)
  1313. pic = s->current_picture_ptr;
  1314. } else {
  1315. i = ff_find_unused_picture(s, 0);
  1316. if (i < 0) {
  1317. av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
  1318. return i;
  1319. }
  1320. pic = &s->picture[i];
  1321. }
  1322. pic->reference = 0;
  1323. if (!s->droppable) {
  1324. if (s->pict_type != AV_PICTURE_TYPE_B)
  1325. pic->reference = 3;
  1326. }
  1327. pic->f.coded_picture_number = s->coded_picture_number++;
  1328. if (ff_alloc_picture(s, pic, 0) < 0)
  1329. return -1;
  1330. s->current_picture_ptr = pic;
  1331. // FIXME use only the vars from current_pic
  1332. s->current_picture_ptr->f.top_field_first = s->top_field_first;
  1333. if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
  1334. s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
  1335. if (s->picture_structure != PICT_FRAME)
  1336. s->current_picture_ptr->f.top_field_first =
  1337. (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
  1338. }
  1339. s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
  1340. !s->progressive_sequence;
  1341. s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
  1342. }
  1343. s->current_picture_ptr->f.pict_type = s->pict_type;
  1344. // if (s->flags && CODEC_FLAG_QSCALE)
  1345. // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
  1346. s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
  1347. if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
  1348. s->current_picture_ptr)) < 0)
  1349. return ret;
  1350. if (s->pict_type != AV_PICTURE_TYPE_B) {
  1351. s->last_picture_ptr = s->next_picture_ptr;
  1352. if (!s->droppable)
  1353. s->next_picture_ptr = s->current_picture_ptr;
  1354. }
  1355. av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
  1356. s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
  1357. s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
  1358. s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
  1359. s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
  1360. s->pict_type, s->droppable);
  1361. if ((s->last_picture_ptr == NULL ||
  1362. s->last_picture_ptr->f.data[0] == NULL) &&
  1363. (s->pict_type != AV_PICTURE_TYPE_I ||
  1364. s->picture_structure != PICT_FRAME)) {
  1365. int h_chroma_shift, v_chroma_shift;
  1366. av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
  1367. &h_chroma_shift, &v_chroma_shift);
  1368. if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f.data[0])
  1369. av_log(avctx, AV_LOG_INFO,
  1370. "allocating dummy last picture for B frame\n");
  1371. else if (s->pict_type != AV_PICTURE_TYPE_I)
  1372. av_log(avctx, AV_LOG_ERROR,
  1373. "warning: first frame is no keyframe\n");
  1374. else if (s->picture_structure != PICT_FRAME)
  1375. av_log(avctx, AV_LOG_INFO,
  1376. "allocate dummy last picture for field based first keyframe\n");
  1377. /* Allocate a dummy frame */
  1378. i = ff_find_unused_picture(s, 0);
  1379. if (i < 0) {
  1380. av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
  1381. return i;
  1382. }
  1383. s->last_picture_ptr = &s->picture[i];
  1384. s->last_picture_ptr->f.key_frame = 0;
  1385. if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
  1386. s->last_picture_ptr = NULL;
  1387. return -1;
  1388. }
  1389. memset(s->last_picture_ptr->f.data[0], 0x80,
  1390. avctx->height * s->last_picture_ptr->f.linesize[0]);
  1391. memset(s->last_picture_ptr->f.data[1], 0x80,
  1392. (avctx->height >> v_chroma_shift) *
  1393. s->last_picture_ptr->f.linesize[1]);
  1394. memset(s->last_picture_ptr->f.data[2], 0x80,
  1395. (avctx->height >> v_chroma_shift) *
  1396. s->last_picture_ptr->f.linesize[2]);
  1397. if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
  1398. for(i=0; i<avctx->height; i++)
  1399. memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
  1400. }
  1401. ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
  1402. ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
  1403. }
  1404. if ((s->next_picture_ptr == NULL ||
  1405. s->next_picture_ptr->f.data[0] == NULL) &&
  1406. s->pict_type == AV_PICTURE_TYPE_B) {
  1407. /* Allocate a dummy frame */
  1408. i = ff_find_unused_picture(s, 0);
  1409. if (i < 0) {
  1410. av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
  1411. return i;
  1412. }
  1413. s->next_picture_ptr = &s->picture[i];
  1414. s->next_picture_ptr->f.key_frame = 0;
  1415. if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
  1416. s->next_picture_ptr = NULL;
  1417. return -1;
  1418. }
  1419. ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
  1420. ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
  1421. }
  1422. #if 0 // BUFREF-FIXME
  1423. memset(s->last_picture.f.data, 0, sizeof(s->last_picture.f.data));
  1424. memset(s->next_picture.f.data, 0, sizeof(s->next_picture.f.data));
  1425. #endif
  1426. if (s->last_picture_ptr) {
  1427. ff_mpeg_unref_picture(s, &s->last_picture);
  1428. if (s->last_picture_ptr->f.data[0] &&
  1429. (ret = ff_mpeg_ref_picture(s, &s->last_picture,
  1430. s->last_picture_ptr)) < 0)
  1431. return ret;
  1432. }
  1433. if (s->next_picture_ptr) {
  1434. ff_mpeg_unref_picture(s, &s->next_picture);
  1435. if (s->next_picture_ptr->f.data[0] &&
  1436. (ret = ff_mpeg_ref_picture(s, &s->next_picture,
  1437. s->next_picture_ptr)) < 0)
  1438. return ret;
  1439. }
  1440. av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
  1441. s->last_picture_ptr->f.data[0]));
  1442. if (s->picture_structure!= PICT_FRAME) {
  1443. int i;
  1444. for (i = 0; i < 4; i++) {
  1445. if (s->picture_structure == PICT_BOTTOM_FIELD) {
  1446. s->current_picture.f.data[i] +=
  1447. s->current_picture.f.linesize[i];
  1448. }
  1449. s->current_picture.f.linesize[i] *= 2;
  1450. s->last_picture.f.linesize[i] *= 2;
  1451. s->next_picture.f.linesize[i] *= 2;
  1452. }
  1453. }
  1454. s->err_recognition = avctx->err_recognition;
  1455. /* set dequantizer, we can't do it during init as
  1456. * it might change for mpeg4 and we can't do it in the header
  1457. * decode as init is not called for mpeg4 there yet */
  1458. if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
  1459. s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
  1460. s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
  1461. } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
  1462. s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
  1463. s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
  1464. } else {
  1465. s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
  1466. s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
  1467. }
  1468. if (s->dct_error_sum) {
  1469. av_assert2(s->avctx->noise_reduction && s->encoding);
  1470. update_noise_reduction(s);
  1471. }
  1472. if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
  1473. return ff_xvmc_field_start(s, avctx);
  1474. return 0;
  1475. }
  1476. /* generic function for encode/decode called after a
  1477. * frame has been coded/decoded. */
  1478. void ff_MPV_frame_end(MpegEncContext *s)
  1479. {
  1480. /* redraw edges for the frame if decoding didn't complete */
  1481. // just to make sure that all data is rendered.
  1482. if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
  1483. ff_xvmc_field_end(s);
  1484. } else if ((s->er.error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) &&
  1485. !s->avctx->hwaccel &&
  1486. !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
  1487. s->unrestricted_mv &&
  1488. s->current_picture.reference &&
  1489. !s->intra_only &&
  1490. !(s->flags & CODEC_FLAG_EMU_EDGE) &&
  1491. !s->avctx->lowres
  1492. ) {
  1493. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
  1494. int hshift = desc->log2_chroma_w;
  1495. int vshift = desc->log2_chroma_h;
  1496. s->dsp.draw_edges(s->current_picture.f.data[0], s->current_picture.f.linesize[0],
  1497. s->h_edge_pos, s->v_edge_pos,
  1498. EDGE_WIDTH, EDGE_WIDTH,
  1499. EDGE_TOP | EDGE_BOTTOM);
  1500. s->dsp.draw_edges(s->current_picture.f.data[1], s->current_picture.f.linesize[1],
  1501. s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
  1502. EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
  1503. EDGE_TOP | EDGE_BOTTOM);
  1504. s->dsp.draw_edges(s->current_picture.f.data[2], s->current_picture.f.linesize[2],
  1505. s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
  1506. EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
  1507. EDGE_TOP | EDGE_BOTTOM);
  1508. }
  1509. emms_c();
  1510. s->last_pict_type = s->pict_type;
  1511. s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
  1512. if (s->pict_type!= AV_PICTURE_TYPE_B) {
  1513. s->last_non_b_pict_type = s->pict_type;
  1514. }
  1515. #if 0
  1516. /* copy back current_picture variables */
  1517. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1518. if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
  1519. s->picture[i] = s->current_picture;
  1520. break;
  1521. }
  1522. }
  1523. av_assert0(i < MAX_PICTURE_COUNT);
  1524. #endif
  1525. // clear copies, to avoid confusion
  1526. #if 0
  1527. memset(&s->last_picture, 0, sizeof(Picture));
  1528. memset(&s->next_picture, 0, sizeof(Picture));
  1529. memset(&s->current_picture, 0, sizeof(Picture));
  1530. #endif
  1531. s->avctx->coded_frame = &s->current_picture_ptr->f;
  1532. if (s->current_picture.reference)
  1533. ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
  1534. }
  1535. /**
  1536. * Draw a line from (ex, ey) -> (sx, sy).
  1537. * @param w width of the image
  1538. * @param h height of the image
  1539. * @param stride stride/linesize of the image
  1540. * @param color color of the arrow
  1541. */
  1542. static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
  1543. int w, int h, int stride, int color)
  1544. {
  1545. int x, y, fr, f;
  1546. sx = av_clip(sx, 0, w - 1);
  1547. sy = av_clip(sy, 0, h - 1);
  1548. ex = av_clip(ex, 0, w - 1);
  1549. ey = av_clip(ey, 0, h - 1);
  1550. buf[sy * stride + sx] += color;
  1551. if (FFABS(ex - sx) > FFABS(ey - sy)) {
  1552. if (sx > ex) {
  1553. FFSWAP(int, sx, ex);
  1554. FFSWAP(int, sy, ey);
  1555. }
  1556. buf += sx + sy * stride;
  1557. ex -= sx;
  1558. f = ((ey - sy) << 16) / ex;
  1559. for (x = 0; x <= ex; x++) {
  1560. y = (x * f) >> 16;
  1561. fr = (x * f) & 0xFFFF;
  1562. buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
  1563. if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
  1564. }
  1565. } else {
  1566. if (sy > ey) {
  1567. FFSWAP(int, sx, ex);
  1568. FFSWAP(int, sy, ey);
  1569. }
  1570. buf += sx + sy * stride;
  1571. ey -= sy;
  1572. if (ey)
  1573. f = ((ex - sx) << 16) / ey;
  1574. else
  1575. f = 0;
  1576. for(y= 0; y <= ey; y++){
  1577. x = (y*f) >> 16;
  1578. fr = (y*f) & 0xFFFF;
  1579. buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
  1580. if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
  1581. }
  1582. }
  1583. }
  1584. /**
  1585. * Draw an arrow from (ex, ey) -> (sx, sy).
  1586. * @param w width of the image
  1587. * @param h height of the image
  1588. * @param stride stride/linesize of the image
  1589. * @param color color of the arrow
  1590. */
  1591. static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
  1592. int ey, int w, int h, int stride, int color)
  1593. {
  1594. int dx,dy;
  1595. sx = av_clip(sx, -100, w + 100);
  1596. sy = av_clip(sy, -100, h + 100);
  1597. ex = av_clip(ex, -100, w + 100);
  1598. ey = av_clip(ey, -100, h + 100);
  1599. dx = ex - sx;
  1600. dy = ey - sy;
  1601. if (dx * dx + dy * dy > 3 * 3) {
  1602. int rx = dx + dy;
  1603. int ry = -dx + dy;
  1604. int length = ff_sqrt((rx * rx + ry * ry) << 8);
  1605. // FIXME subpixel accuracy
  1606. rx = ROUNDED_DIV(rx * 3 << 4, length);
  1607. ry = ROUNDED_DIV(ry * 3 << 4, length);
  1608. draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
  1609. draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
  1610. }
  1611. draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
  1612. }
  1613. /**
  1614. * Print debugging info for the given picture.
  1615. */
  1616. void ff_print_debug_info2(AVCodecContext *avctx, Picture *p, AVFrame *pict, uint8_t *mbskip_table,
  1617. int *low_delay,
  1618. int mb_width, int mb_height, int mb_stride, int quarter_sample)
  1619. {
  1620. if (avctx->hwaccel || !p || !p->mb_type
  1621. || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
  1622. return;
  1623. if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
  1624. int x,y;
  1625. av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
  1626. av_get_picture_type_char(pict->pict_type));
  1627. for (y = 0; y < mb_height; y++) {
  1628. for (x = 0; x < mb_width; x++) {
  1629. if (avctx->debug & FF_DEBUG_SKIP) {
  1630. int count = mbskip_table[x + y * mb_stride];
  1631. if (count > 9)
  1632. count = 9;
  1633. av_log(avctx, AV_LOG_DEBUG, "%1d", count);
  1634. }
  1635. if (avctx->debug & FF_DEBUG_QP) {
  1636. av_log(avctx, AV_LOG_DEBUG, "%2d",
  1637. p->qscale_table[x + y * mb_stride]);
  1638. }
  1639. if (avctx->debug & FF_DEBUG_MB_TYPE) {
  1640. int mb_type = p->mb_type[x + y * mb_stride];
  1641. // Type & MV direction
  1642. if (IS_PCM(mb_type))
  1643. av_log(avctx, AV_LOG_DEBUG, "P");
  1644. else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
  1645. av_log(avctx, AV_LOG_DEBUG, "A");
  1646. else if (IS_INTRA4x4(mb_type))
  1647. av_log(avctx, AV_LOG_DEBUG, "i");
  1648. else if (IS_INTRA16x16(mb_type))
  1649. av_log(avctx, AV_LOG_DEBUG, "I");
  1650. else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
  1651. av_log(avctx, AV_LOG_DEBUG, "d");
  1652. else if (IS_DIRECT(mb_type))
  1653. av_log(avctx, AV_LOG_DEBUG, "D");
  1654. else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
  1655. av_log(avctx, AV_LOG_DEBUG, "g");
  1656. else if (IS_GMC(mb_type))
  1657. av_log(avctx, AV_LOG_DEBUG, "G");
  1658. else if (IS_SKIP(mb_type))
  1659. av_log(avctx, AV_LOG_DEBUG, "S");
  1660. else if (!USES_LIST(mb_type, 1))
  1661. av_log(avctx, AV_LOG_DEBUG, ">");
  1662. else if (!USES_LIST(mb_type, 0))
  1663. av_log(avctx, AV_LOG_DEBUG, "<");
  1664. else {
  1665. av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
  1666. av_log(avctx, AV_LOG_DEBUG, "X");
  1667. }
  1668. // segmentation
  1669. if (IS_8X8(mb_type))
  1670. av_log(avctx, AV_LOG_DEBUG, "+");
  1671. else if (IS_16X8(mb_type))
  1672. av_log(avctx, AV_LOG_DEBUG, "-");
  1673. else if (IS_8X16(mb_type))
  1674. av_log(avctx, AV_LOG_DEBUG, "|");
  1675. else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
  1676. av_log(avctx, AV_LOG_DEBUG, " ");
  1677. else
  1678. av_log(avctx, AV_LOG_DEBUG, "?");
  1679. if (IS_INTERLACED(mb_type))
  1680. av_log(avctx, AV_LOG_DEBUG, "=");
  1681. else
  1682. av_log(avctx, AV_LOG_DEBUG, " ");
  1683. }
  1684. }
  1685. av_log(avctx, AV_LOG_DEBUG, "\n");
  1686. }
  1687. }
  1688. if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
  1689. (avctx->debug_mv)) {
  1690. const int shift = 1 + quarter_sample;
  1691. int mb_y;
  1692. uint8_t *ptr;
  1693. int i;
  1694. int h_chroma_shift, v_chroma_shift, block_height;
  1695. const int width = avctx->width;
  1696. const int height = avctx->height;
  1697. const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
  1698. const int mv_stride = (mb_width << mv_sample_log2) +
  1699. (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
  1700. *low_delay = 0; // needed to see the vectors without trashing the buffers
  1701. avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
  1702. av_frame_make_writable(pict);
  1703. pict->opaque = NULL;
  1704. ptr = pict->data[0];
  1705. block_height = 16 >> v_chroma_shift;
  1706. for (mb_y = 0; mb_y < mb_height; mb_y++) {
  1707. int mb_x;
  1708. for (mb_x = 0; mb_x < mb_width; mb_x++) {
  1709. const int mb_index = mb_x + mb_y * mb_stride;
  1710. if ((avctx->debug_mv) && p->motion_val[0]) {
  1711. int type;
  1712. for (type = 0; type < 3; type++) {
  1713. int direction = 0;
  1714. switch (type) {
  1715. case 0:
  1716. if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
  1717. (pict->pict_type!= AV_PICTURE_TYPE_P))
  1718. continue;
  1719. direction = 0;
  1720. break;
  1721. case 1:
  1722. if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
  1723. (pict->pict_type!= AV_PICTURE_TYPE_B))
  1724. continue;
  1725. direction = 0;
  1726. break;
  1727. case 2:
  1728. if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
  1729. (pict->pict_type!= AV_PICTURE_TYPE_B))
  1730. continue;
  1731. direction = 1;
  1732. break;
  1733. }
  1734. if (!USES_LIST(p->mb_type[mb_index], direction))
  1735. continue;
  1736. if (IS_8X8(p->mb_type[mb_index])) {
  1737. int i;
  1738. for (i = 0; i < 4; i++) {
  1739. int sx = mb_x * 16 + 4 + 8 * (i & 1);
  1740. int sy = mb_y * 16 + 4 + 8 * (i >> 1);
  1741. int xy = (mb_x * 2 + (i & 1) +
  1742. (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
  1743. int mx = (p->motion_val[direction][xy][0] >> shift) + sx;
  1744. int my = (p->motion_val[direction][xy][1] >> shift) + sy;
  1745. draw_arrow(ptr, sx, sy, mx, my, width,
  1746. height, pict->linesize[0], 100);
  1747. }
  1748. } else if (IS_16X8(p->mb_type[mb_index])) {
  1749. int i;
  1750. for (i = 0; i < 2; i++) {
  1751. int sx = mb_x * 16 + 8;
  1752. int sy = mb_y * 16 + 4 + 8 * i;
  1753. int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
  1754. int mx = (p->motion_val[direction][xy][0] >> shift);
  1755. int my = (p->motion_val[direction][xy][1] >> shift);
  1756. if (IS_INTERLACED(p->mb_type[mb_index]))
  1757. my *= 2;
  1758. draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
  1759. height, pict->linesize[0], 100);
  1760. }
  1761. } else if (IS_8X16(p->mb_type[mb_index])) {
  1762. int i;
  1763. for (i = 0; i < 2; i++) {
  1764. int sx = mb_x * 16 + 4 + 8 * i;
  1765. int sy = mb_y * 16 + 8;
  1766. int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
  1767. int mx = p->motion_val[direction][xy][0] >> shift;
  1768. int my = p->motion_val[direction][xy][1] >> shift;
  1769. if (IS_INTERLACED(p->mb_type[mb_index]))
  1770. my *= 2;
  1771. draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
  1772. height, pict->linesize[0], 100);
  1773. }
  1774. } else {
  1775. int sx= mb_x * 16 + 8;
  1776. int sy= mb_y * 16 + 8;
  1777. int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
  1778. int mx= (p->motion_val[direction][xy][0]>>shift) + sx;
  1779. int my= (p->motion_val[direction][xy][1]>>shift) + sy;
  1780. draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100);
  1781. }
  1782. }
  1783. }
  1784. if ((avctx->debug & FF_DEBUG_VIS_QP)) {
  1785. uint64_t c = (p->qscale_table[mb_index] * 128 / 31) *
  1786. 0x0101010101010101ULL;
  1787. int y;
  1788. for (y = 0; y < block_height; y++) {
  1789. *(uint64_t *)(pict->data[1] + 8 * mb_x +
  1790. (block_height * mb_y + y) *
  1791. pict->linesize[1]) = c;
  1792. *(uint64_t *)(pict->data[2] + 8 * mb_x +
  1793. (block_height * mb_y + y) *
  1794. pict->linesize[2]) = c;
  1795. }
  1796. }
  1797. if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
  1798. p->motion_val[0]) {
  1799. int mb_type = p->mb_type[mb_index];
  1800. uint64_t u,v;
  1801. int y;
  1802. #define COLOR(theta, r) \
  1803. u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
  1804. v = (int)(128 + r * sin(theta * 3.141592 / 180));
  1805. u = v = 128;
  1806. if (IS_PCM(mb_type)) {
  1807. COLOR(120, 48)
  1808. } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
  1809. IS_INTRA16x16(mb_type)) {
  1810. COLOR(30, 48)
  1811. } else if (IS_INTRA4x4(mb_type)) {
  1812. COLOR(90, 48)
  1813. } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
  1814. // COLOR(120, 48)
  1815. } else if (IS_DIRECT(mb_type)) {
  1816. COLOR(150, 48)
  1817. } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
  1818. COLOR(170, 48)
  1819. } else if (IS_GMC(mb_type)) {
  1820. COLOR(190, 48)
  1821. } else if (IS_SKIP(mb_type)) {
  1822. // COLOR(180, 48)
  1823. } else if (!USES_LIST(mb_type, 1)) {
  1824. COLOR(240, 48)
  1825. } else if (!USES_LIST(mb_type, 0)) {
  1826. COLOR(0, 48)
  1827. } else {
  1828. av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
  1829. COLOR(300,48)
  1830. }
  1831. u *= 0x0101010101010101ULL;
  1832. v *= 0x0101010101010101ULL;
  1833. for (y = 0; y < block_height; y++) {
  1834. *(uint64_t *)(pict->data[1] + 8 * mb_x +
  1835. (block_height * mb_y + y) * pict->linesize[1]) = u;
  1836. *(uint64_t *)(pict->data[2] + 8 * mb_x +
  1837. (block_height * mb_y + y) * pict->linesize[2]) = v;
  1838. }
  1839. // segmentation
  1840. if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
  1841. *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
  1842. (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
  1843. *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
  1844. (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
  1845. }
  1846. if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
  1847. for (y = 0; y < 16; y++)
  1848. pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
  1849. pict->linesize[0]] ^= 0x80;
  1850. }
  1851. if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
  1852. int dm = 1 << (mv_sample_log2 - 2);
  1853. for (i = 0; i < 4; i++) {
  1854. int sx = mb_x * 16 + 8 * (i & 1);
  1855. int sy = mb_y * 16 + 8 * (i >> 1);
  1856. int xy = (mb_x * 2 + (i & 1) +
  1857. (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
  1858. // FIXME bidir
  1859. int32_t *mv = (int32_t *) &p->motion_val[0][xy];
  1860. if (mv[0] != mv[dm] ||
  1861. mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
  1862. for (y = 0; y < 8; y++)
  1863. pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
  1864. if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
  1865. *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
  1866. pict->linesize[0]) ^= 0x8080808080808080ULL;
  1867. }
  1868. }
  1869. if (IS_INTERLACED(mb_type) &&
  1870. avctx->codec->id == AV_CODEC_ID_H264) {
  1871. // hmm
  1872. }
  1873. }
  1874. mbskip_table[mb_index] = 0;
  1875. }
  1876. }
  1877. }
  1878. }
  1879. void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
  1880. {
  1881. ff_print_debug_info2(s->avctx, p, pict, s->mbskip_table, &s->low_delay,
  1882. s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
  1883. }
  1884. int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
  1885. {
  1886. AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
  1887. int offset = 2*s->mb_stride + 1;
  1888. if(!ref)
  1889. return AVERROR(ENOMEM);
  1890. av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
  1891. ref->size -= offset;
  1892. ref->data += offset;
  1893. return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
  1894. }
  1895. static inline int hpel_motion_lowres(MpegEncContext *s,
  1896. uint8_t *dest, uint8_t *src,
  1897. int field_based, int field_select,
  1898. int src_x, int src_y,
  1899. int width, int height, int stride,
  1900. int h_edge_pos, int v_edge_pos,
  1901. int w, int h, h264_chroma_mc_func *pix_op,
  1902. int motion_x, int motion_y)
  1903. {
  1904. const int lowres = s->avctx->lowres;
  1905. const int op_index = FFMIN(lowres, 3);
  1906. const int s_mask = (2 << lowres) - 1;
  1907. int emu = 0;
  1908. int sx, sy;
  1909. if (s->quarter_sample) {
  1910. motion_x /= 2;
  1911. motion_y /= 2;
  1912. }
  1913. sx = motion_x & s_mask;
  1914. sy = motion_y & s_mask;
  1915. src_x += motion_x >> lowres + 1;
  1916. src_y += motion_y >> lowres + 1;
  1917. src += src_y * stride + src_x;
  1918. if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
  1919. (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
  1920. s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
  1921. (h + 1) << field_based, src_x,
  1922. src_y << field_based,
  1923. h_edge_pos,
  1924. v_edge_pos);
  1925. src = s->edge_emu_buffer;
  1926. emu = 1;
  1927. }
  1928. sx = (sx << 2) >> lowres;
  1929. sy = (sy << 2) >> lowres;
  1930. if (field_select)
  1931. src += s->linesize;
  1932. pix_op[op_index](dest, src, stride, h, sx, sy);
  1933. return emu;
  1934. }
  1935. /* apply one mpeg motion vector to the three components */
  1936. static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
  1937. uint8_t *dest_y,
  1938. uint8_t *dest_cb,
  1939. uint8_t *dest_cr,
  1940. int field_based,
  1941. int bottom_field,
  1942. int field_select,
  1943. uint8_t **ref_picture,
  1944. h264_chroma_mc_func *pix_op,
  1945. int motion_x, int motion_y,
  1946. int h, int mb_y)
  1947. {
  1948. uint8_t *ptr_y, *ptr_cb, *ptr_cr;
  1949. int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
  1950. uvsx, uvsy;
  1951. const int lowres = s->avctx->lowres;
  1952. const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
  1953. const int block_s = 8>>lowres;
  1954. const int s_mask = (2 << lowres) - 1;
  1955. const int h_edge_pos = s->h_edge_pos >> lowres;
  1956. const int v_edge_pos = s->v_edge_pos >> lowres;
  1957. linesize = s->current_picture.f.linesize[0] << field_based;
  1958. uvlinesize = s->current_picture.f.linesize[1] << field_based;
  1959. // FIXME obviously not perfect but qpel will not work in lowres anyway
  1960. if (s->quarter_sample) {
  1961. motion_x /= 2;
  1962. motion_y /= 2;
  1963. }
  1964. if(field_based){
  1965. motion_y += (bottom_field - field_select)*((1 << lowres)-1);
  1966. }
  1967. sx = motion_x & s_mask;
  1968. sy = motion_y & s_mask;
  1969. src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
  1970. src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
  1971. if (s->out_format == FMT_H263) {
  1972. uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
  1973. uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
  1974. uvsrc_x = src_x >> 1;
  1975. uvsrc_y = src_y >> 1;
  1976. } else if (s->out_format == FMT_H261) {
  1977. // even chroma mv's are full pel in H261
  1978. mx = motion_x / 4;
  1979. my = motion_y / 4;
  1980. uvsx = (2 * mx) & s_mask;
  1981. uvsy = (2 * my) & s_mask;
  1982. uvsrc_x = s->mb_x * block_s + (mx >> lowres);
  1983. uvsrc_y = mb_y * block_s + (my >> lowres);
  1984. } else {
  1985. if(s->chroma_y_shift){
  1986. mx = motion_x / 2;
  1987. my = motion_y / 2;
  1988. uvsx = mx & s_mask;
  1989. uvsy = my & s_mask;
  1990. uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
  1991. uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
  1992. } else {
  1993. if(s->chroma_x_shift){
  1994. //Chroma422
  1995. mx = motion_x / 2;
  1996. uvsx = mx & s_mask;
  1997. uvsy = motion_y & s_mask;
  1998. uvsrc_y = src_y;
  1999. uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
  2000. } else {
  2001. //Chroma444
  2002. uvsx = motion_x & s_mask;
  2003. uvsy = motion_y & s_mask;
  2004. uvsrc_x = src_x;
  2005. uvsrc_y = src_y;
  2006. }
  2007. }
  2008. }
  2009. ptr_y = ref_picture[0] + src_y * linesize + src_x;
  2010. ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
  2011. ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
  2012. if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
  2013. (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
  2014. s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
  2015. linesize >> field_based, 17, 17 + field_based,
  2016. src_x, src_y << field_based, h_edge_pos,
  2017. v_edge_pos);
  2018. ptr_y = s->edge_emu_buffer;
  2019. if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
  2020. uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
  2021. s->vdsp.emulated_edge_mc(uvbuf , ptr_cb, uvlinesize >> field_based, 9,
  2022. 9 + field_based,
  2023. uvsrc_x, uvsrc_y << field_based,
  2024. h_edge_pos >> 1, v_edge_pos >> 1);
  2025. s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr, uvlinesize >> field_based, 9,
  2026. 9 + field_based,
  2027. uvsrc_x, uvsrc_y << field_based,
  2028. h_edge_pos >> 1, v_edge_pos >> 1);
  2029. ptr_cb = uvbuf;
  2030. ptr_cr = uvbuf + 16;
  2031. }
  2032. }
  2033. // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
  2034. if (bottom_field) {
  2035. dest_y += s->linesize;
  2036. dest_cb += s->uvlinesize;
  2037. dest_cr += s->uvlinesize;
  2038. }
  2039. if (field_select) {
  2040. ptr_y += s->linesize;
  2041. ptr_cb += s->uvlinesize;
  2042. ptr_cr += s->uvlinesize;
  2043. }
  2044. sx = (sx << 2) >> lowres;
  2045. sy = (sy << 2) >> lowres;
  2046. pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
  2047. if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
  2048. int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
  2049. uvsx = (uvsx << 2) >> lowres;
  2050. uvsy = (uvsy << 2) >> lowres;
  2051. if (hc) {
  2052. pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
  2053. pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
  2054. }
  2055. }
  2056. // FIXME h261 lowres loop filter
  2057. }
  2058. static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
  2059. uint8_t *dest_cb, uint8_t *dest_cr,
  2060. uint8_t **ref_picture,
  2061. h264_chroma_mc_func * pix_op,
  2062. int mx, int my)
  2063. {
  2064. const int lowres = s->avctx->lowres;
  2065. const int op_index = FFMIN(lowres, 3);
  2066. const int block_s = 8 >> lowres;
  2067. const int s_mask = (2 << lowres) - 1;
  2068. const int h_edge_pos = s->h_edge_pos >> lowres + 1;
  2069. const int v_edge_pos = s->v_edge_pos >> lowres + 1;
  2070. int emu = 0, src_x, src_y, offset, sx, sy;
  2071. uint8_t *ptr;
  2072. if (s->quarter_sample) {
  2073. mx /= 2;
  2074. my /= 2;
  2075. }
  2076. /* In case of 8X8, we construct a single chroma motion vector
  2077. with a special rounding */
  2078. mx = ff_h263_round_chroma(mx);
  2079. my = ff_h263_round_chroma(my);
  2080. sx = mx & s_mask;
  2081. sy = my & s_mask;
  2082. src_x = s->mb_x * block_s + (mx >> lowres + 1);
  2083. src_y = s->mb_y * block_s + (my >> lowres + 1);
  2084. offset = src_y * s->uvlinesize + src_x;
  2085. ptr = ref_picture[1] + offset;
  2086. if (s->flags & CODEC_FLAG_EMU_EDGE) {
  2087. if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
  2088. (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
  2089. s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
  2090. 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
  2091. ptr = s->edge_emu_buffer;
  2092. emu = 1;
  2093. }
  2094. }
  2095. sx = (sx << 2) >> lowres;
  2096. sy = (sy << 2) >> lowres;
  2097. pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
  2098. ptr = ref_picture[2] + offset;
  2099. if (emu) {
  2100. s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
  2101. src_x, src_y, h_edge_pos, v_edge_pos);
  2102. ptr = s->edge_emu_buffer;
  2103. }
  2104. pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
  2105. }
  2106. /**
  2107. * motion compensation of a single macroblock
  2108. * @param s context
  2109. * @param dest_y luma destination pointer
  2110. * @param dest_cb chroma cb/u destination pointer
  2111. * @param dest_cr chroma cr/v destination pointer
  2112. * @param dir direction (0->forward, 1->backward)
  2113. * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
  2114. * @param pix_op halfpel motion compensation function (average or put normally)
  2115. * the motion vectors are taken from s->mv and the MV type from s->mv_type
  2116. */
  2117. static inline void MPV_motion_lowres(MpegEncContext *s,
  2118. uint8_t *dest_y, uint8_t *dest_cb,
  2119. uint8_t *dest_cr,
  2120. int dir, uint8_t **ref_picture,
  2121. h264_chroma_mc_func *pix_op)
  2122. {
  2123. int mx, my;
  2124. int mb_x, mb_y, i;
  2125. const int lowres = s->avctx->lowres;
  2126. const int block_s = 8 >>lowres;
  2127. mb_x = s->mb_x;
  2128. mb_y = s->mb_y;
  2129. switch (s->mv_type) {
  2130. case MV_TYPE_16X16:
  2131. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  2132. 0, 0, 0,
  2133. ref_picture, pix_op,
  2134. s->mv[dir][0][0], s->mv[dir][0][1],
  2135. 2 * block_s, mb_y);
  2136. break;
  2137. case MV_TYPE_8X8:
  2138. mx = 0;
  2139. my = 0;
  2140. for (i = 0; i < 4; i++) {
  2141. hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
  2142. s->linesize) * block_s,
  2143. ref_picture[0], 0, 0,
  2144. (2 * mb_x + (i & 1)) * block_s,
  2145. (2 * mb_y + (i >> 1)) * block_s,
  2146. s->width, s->height, s->linesize,
  2147. s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
  2148. block_s, block_s, pix_op,
  2149. s->mv[dir][i][0], s->mv[dir][i][1]);
  2150. mx += s->mv[dir][i][0];
  2151. my += s->mv[dir][i][1];
  2152. }
  2153. if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
  2154. chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
  2155. pix_op, mx, my);
  2156. break;
  2157. case MV_TYPE_FIELD:
  2158. if (s->picture_structure == PICT_FRAME) {
  2159. /* top field */
  2160. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  2161. 1, 0, s->field_select[dir][0],
  2162. ref_picture, pix_op,
  2163. s->mv[dir][0][0], s->mv[dir][0][1],
  2164. block_s, mb_y);
  2165. /* bottom field */
  2166. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  2167. 1, 1, s->field_select[dir][1],
  2168. ref_picture, pix_op,
  2169. s->mv[dir][1][0], s->mv[dir][1][1],
  2170. block_s, mb_y);
  2171. } else {
  2172. if (s->picture_structure != s->field_select[dir][0] + 1 &&
  2173. s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
  2174. ref_picture = s->current_picture_ptr->f.data;
  2175. }
  2176. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  2177. 0, 0, s->field_select[dir][0],
  2178. ref_picture, pix_op,
  2179. s->mv[dir][0][0],
  2180. s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
  2181. }
  2182. break;
  2183. case MV_TYPE_16X8:
  2184. for (i = 0; i < 2; i++) {
  2185. uint8_t **ref2picture;
  2186. if (s->picture_structure == s->field_select[dir][i] + 1 ||
  2187. s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
  2188. ref2picture = ref_picture;
  2189. } else {
  2190. ref2picture = s->current_picture_ptr->f.data;
  2191. }
  2192. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  2193. 0, 0, s->field_select[dir][i],
  2194. ref2picture, pix_op,
  2195. s->mv[dir][i][0], s->mv[dir][i][1] +
  2196. 2 * block_s * i, block_s, mb_y >> 1);
  2197. dest_y += 2 * block_s * s->linesize;
  2198. dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
  2199. dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
  2200. }
  2201. break;
  2202. case MV_TYPE_DMV:
  2203. if (s->picture_structure == PICT_FRAME) {
  2204. for (i = 0; i < 2; i++) {
  2205. int j;
  2206. for (j = 0; j < 2; j++) {
  2207. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  2208. 1, j, j ^ i,
  2209. ref_picture, pix_op,
  2210. s->mv[dir][2 * i + j][0],
  2211. s->mv[dir][2 * i + j][1],
  2212. block_s, mb_y);
  2213. }
  2214. pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
  2215. }
  2216. } else {
  2217. for (i = 0; i < 2; i++) {
  2218. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  2219. 0, 0, s->picture_structure != i + 1,
  2220. ref_picture, pix_op,
  2221. s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
  2222. 2 * block_s, mb_y >> 1);
  2223. // after put we make avg of the same block
  2224. pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
  2225. // opposite parity is always in the same
  2226. // frame if this is second field
  2227. if (!s->first_field) {
  2228. ref_picture = s->current_picture_ptr->f.data;
  2229. }
  2230. }
  2231. }
  2232. break;
  2233. default:
  2234. av_assert2(0);
  2235. }
  2236. }
  2237. /**
  2238. * find the lowest MB row referenced in the MVs
  2239. */
  2240. int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
  2241. {
  2242. int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
  2243. int my, off, i, mvs;
  2244. if (s->picture_structure != PICT_FRAME || s->mcsel)
  2245. goto unhandled;
  2246. switch (s->mv_type) {
  2247. case MV_TYPE_16X16:
  2248. mvs = 1;
  2249. break;
  2250. case MV_TYPE_16X8:
  2251. mvs = 2;
  2252. break;
  2253. case MV_TYPE_8X8:
  2254. mvs = 4;
  2255. break;
  2256. default:
  2257. goto unhandled;
  2258. }
  2259. for (i = 0; i < mvs; i++) {
  2260. my = s->mv[dir][i][1]<<qpel_shift;
  2261. my_max = FFMAX(my_max, my);
  2262. my_min = FFMIN(my_min, my);
  2263. }
  2264. off = (FFMAX(-my_min, my_max) + 63) >> 6;
  2265. return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
  2266. unhandled:
  2267. return s->mb_height-1;
  2268. }
  2269. /* put block[] to dest[] */
  2270. static inline void put_dct(MpegEncContext *s,
  2271. int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
  2272. {
  2273. s->dct_unquantize_intra(s, block, i, qscale);
  2274. s->dsp.idct_put (dest, line_size, block);
  2275. }
  2276. /* add block[] to dest[] */
  2277. static inline void add_dct(MpegEncContext *s,
  2278. int16_t *block, int i, uint8_t *dest, int line_size)
  2279. {
  2280. if (s->block_last_index[i] >= 0) {
  2281. s->dsp.idct_add (dest, line_size, block);
  2282. }
  2283. }
  2284. static inline void add_dequant_dct(MpegEncContext *s,
  2285. int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
  2286. {
  2287. if (s->block_last_index[i] >= 0) {
  2288. s->dct_unquantize_inter(s, block, i, qscale);
  2289. s->dsp.idct_add (dest, line_size, block);
  2290. }
  2291. }
  2292. /**
  2293. * Clean dc, ac, coded_block for the current non-intra MB.
  2294. */
  2295. void ff_clean_intra_table_entries(MpegEncContext *s)
  2296. {
  2297. int wrap = s->b8_stride;
  2298. int xy = s->block_index[0];
  2299. s->dc_val[0][xy ] =
  2300. s->dc_val[0][xy + 1 ] =
  2301. s->dc_val[0][xy + wrap] =
  2302. s->dc_val[0][xy + 1 + wrap] = 1024;
  2303. /* ac pred */
  2304. memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
  2305. memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
  2306. if (s->msmpeg4_version>=3) {
  2307. s->coded_block[xy ] =
  2308. s->coded_block[xy + 1 ] =
  2309. s->coded_block[xy + wrap] =
  2310. s->coded_block[xy + 1 + wrap] = 0;
  2311. }
  2312. /* chroma */
  2313. wrap = s->mb_stride;
  2314. xy = s->mb_x + s->mb_y * wrap;
  2315. s->dc_val[1][xy] =
  2316. s->dc_val[2][xy] = 1024;
  2317. /* ac pred */
  2318. memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
  2319. memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
  2320. s->mbintra_table[xy]= 0;
  2321. }
  2322. /* generic function called after a macroblock has been parsed by the
  2323. decoder or after it has been encoded by the encoder.
  2324. Important variables used:
  2325. s->mb_intra : true if intra macroblock
  2326. s->mv_dir : motion vector direction
  2327. s->mv_type : motion vector type
  2328. s->mv : motion vector
  2329. s->interlaced_dct : true if interlaced dct used (mpeg2)
  2330. */
  2331. static av_always_inline
  2332. void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
  2333. int lowres_flag, int is_mpeg12)
  2334. {
  2335. const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
  2336. if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
  2337. ff_xvmc_decode_mb(s);//xvmc uses pblocks
  2338. return;
  2339. }
  2340. if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
  2341. /* print DCT coefficients */
  2342. int i,j;
  2343. av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
  2344. for(i=0; i<6; i++){
  2345. for(j=0; j<64; j++){
  2346. av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
  2347. }
  2348. av_log(s->avctx, AV_LOG_DEBUG, "\n");
  2349. }
  2350. }
  2351. s->current_picture.qscale_table[mb_xy] = s->qscale;
  2352. /* update DC predictors for P macroblocks */
  2353. if (!s->mb_intra) {
  2354. if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
  2355. if(s->mbintra_table[mb_xy])
  2356. ff_clean_intra_table_entries(s);
  2357. } else {
  2358. s->last_dc[0] =
  2359. s->last_dc[1] =
  2360. s->last_dc[2] = 128 << s->intra_dc_precision;
  2361. }
  2362. }
  2363. else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
  2364. s->mbintra_table[mb_xy]=1;
  2365. if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
  2366. uint8_t *dest_y, *dest_cb, *dest_cr;
  2367. int dct_linesize, dct_offset;
  2368. op_pixels_func (*op_pix)[4];
  2369. qpel_mc_func (*op_qpix)[16];
  2370. const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
  2371. const int uvlinesize = s->current_picture.f.linesize[1];
  2372. const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
  2373. const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
  2374. /* avoid copy if macroblock skipped in last frame too */
  2375. /* skip only during decoding as we might trash the buffers during encoding a bit */
  2376. if(!s->encoding){
  2377. uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
  2378. if (s->mb_skipped) {
  2379. s->mb_skipped= 0;
  2380. av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
  2381. *mbskip_ptr = 1;
  2382. } else if(!s->current_picture.reference) {
  2383. *mbskip_ptr = 1;
  2384. } else{
  2385. *mbskip_ptr = 0; /* not skipped */
  2386. }
  2387. }
  2388. dct_linesize = linesize << s->interlaced_dct;
  2389. dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
  2390. if(readable){
  2391. dest_y= s->dest[0];
  2392. dest_cb= s->dest[1];
  2393. dest_cr= s->dest[2];
  2394. }else{
  2395. dest_y = s->b_scratchpad;
  2396. dest_cb= s->b_scratchpad+16*linesize;
  2397. dest_cr= s->b_scratchpad+32*linesize;
  2398. }
  2399. if (!s->mb_intra) {
  2400. /* motion handling */
  2401. /* decoding or more than one mb_type (MC was already done otherwise) */
  2402. if(!s->encoding){
  2403. if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
  2404. if (s->mv_dir & MV_DIR_FORWARD) {
  2405. ff_thread_await_progress(&s->last_picture_ptr->tf,
  2406. ff_MPV_lowest_referenced_row(s, 0),
  2407. 0);
  2408. }
  2409. if (s->mv_dir & MV_DIR_BACKWARD) {
  2410. ff_thread_await_progress(&s->next_picture_ptr->tf,
  2411. ff_MPV_lowest_referenced_row(s, 1),
  2412. 0);
  2413. }
  2414. }
  2415. if(lowres_flag){
  2416. h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
  2417. if (s->mv_dir & MV_DIR_FORWARD) {
  2418. MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
  2419. op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
  2420. }
  2421. if (s->mv_dir & MV_DIR_BACKWARD) {
  2422. MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
  2423. }
  2424. }else{
  2425. op_qpix= s->me.qpel_put;
  2426. if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
  2427. op_pix = s->hdsp.put_pixels_tab;
  2428. }else{
  2429. op_pix = s->hdsp.put_no_rnd_pixels_tab;
  2430. }
  2431. if (s->mv_dir & MV_DIR_FORWARD) {
  2432. ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
  2433. op_pix = s->hdsp.avg_pixels_tab;
  2434. op_qpix= s->me.qpel_avg;
  2435. }
  2436. if (s->mv_dir & MV_DIR_BACKWARD) {
  2437. ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
  2438. }
  2439. }
  2440. }
  2441. /* skip dequant / idct if we are really late ;) */
  2442. if(s->avctx->skip_idct){
  2443. if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
  2444. ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
  2445. || s->avctx->skip_idct >= AVDISCARD_ALL)
  2446. goto skip_idct;
  2447. }
  2448. /* add dct residue */
  2449. if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
  2450. || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
  2451. add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
  2452. add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
  2453. add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
  2454. add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
  2455. if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
  2456. if (s->chroma_y_shift){
  2457. add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
  2458. add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
  2459. }else{
  2460. dct_linesize >>= 1;
  2461. dct_offset >>=1;
  2462. add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
  2463. add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
  2464. add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
  2465. add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
  2466. }
  2467. }
  2468. } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
  2469. add_dct(s, block[0], 0, dest_y , dct_linesize);
  2470. add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
  2471. add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
  2472. add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
  2473. if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
  2474. if(s->chroma_y_shift){//Chroma420
  2475. add_dct(s, block[4], 4, dest_cb, uvlinesize);
  2476. add_dct(s, block[5], 5, dest_cr, uvlinesize);
  2477. }else{
  2478. //chroma422
  2479. dct_linesize = uvlinesize << s->interlaced_dct;
  2480. dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
  2481. add_dct(s, block[4], 4, dest_cb, dct_linesize);
  2482. add_dct(s, block[5], 5, dest_cr, dct_linesize);
  2483. add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
  2484. add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
  2485. if(!s->chroma_x_shift){//Chroma444
  2486. add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
  2487. add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
  2488. add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
  2489. add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
  2490. }
  2491. }
  2492. }//fi gray
  2493. }
  2494. else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
  2495. ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
  2496. }
  2497. } else {
  2498. /* dct only in intra block */
  2499. if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
  2500. put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
  2501. put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
  2502. put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
  2503. put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
  2504. if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
  2505. if(s->chroma_y_shift){
  2506. put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
  2507. put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
  2508. }else{
  2509. dct_offset >>=1;
  2510. dct_linesize >>=1;
  2511. put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
  2512. put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
  2513. put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
  2514. put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
  2515. }
  2516. }
  2517. }else{
  2518. s->dsp.idct_put(dest_y , dct_linesize, block[0]);
  2519. s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
  2520. s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
  2521. s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
  2522. if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
  2523. if(s->chroma_y_shift){
  2524. s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
  2525. s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
  2526. }else{
  2527. dct_linesize = uvlinesize << s->interlaced_dct;
  2528. dct_offset = s->interlaced_dct? uvlinesize : uvlinesize*block_size;
  2529. s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
  2530. s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
  2531. s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
  2532. s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
  2533. if(!s->chroma_x_shift){//Chroma444
  2534. s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
  2535. s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
  2536. s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
  2537. s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
  2538. }
  2539. }
  2540. }//gray
  2541. }
  2542. }
  2543. skip_idct:
  2544. if(!readable){
  2545. s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
  2546. s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
  2547. s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
  2548. }
  2549. }
  2550. }
  2551. void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
  2552. #if !CONFIG_SMALL
  2553. if(s->out_format == FMT_MPEG1) {
  2554. if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
  2555. else MPV_decode_mb_internal(s, block, 0, 1);
  2556. } else
  2557. #endif
  2558. if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
  2559. else MPV_decode_mb_internal(s, block, 0, 0);
  2560. }
  2561. /**
  2562. * @param h is the normal height, this will be reduced automatically if needed for the last row
  2563. */
  2564. void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
  2565. Picture *last, int y, int h, int picture_structure,
  2566. int first_field, int draw_edges, int low_delay,
  2567. int v_edge_pos, int h_edge_pos)
  2568. {
  2569. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
  2570. int hshift = desc->log2_chroma_w;
  2571. int vshift = desc->log2_chroma_h;
  2572. const int field_pic = picture_structure != PICT_FRAME;
  2573. if(field_pic){
  2574. h <<= 1;
  2575. y <<= 1;
  2576. }
  2577. if (!avctx->hwaccel &&
  2578. !(avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
  2579. draw_edges &&
  2580. cur->reference &&
  2581. !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
  2582. int *linesize = cur->f.linesize;
  2583. int sides = 0, edge_h;
  2584. if (y==0) sides |= EDGE_TOP;
  2585. if (y + h >= v_edge_pos)
  2586. sides |= EDGE_BOTTOM;
  2587. edge_h= FFMIN(h, v_edge_pos - y);
  2588. dsp->draw_edges(cur->f.data[0] + y * linesize[0],
  2589. linesize[0], h_edge_pos, edge_h,
  2590. EDGE_WIDTH, EDGE_WIDTH, sides);
  2591. dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
  2592. linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
  2593. EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
  2594. dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
  2595. linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
  2596. EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
  2597. }
  2598. h = FFMIN(h, avctx->height - y);
  2599. if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
  2600. if (avctx->draw_horiz_band) {
  2601. AVFrame *src;
  2602. int offset[AV_NUM_DATA_POINTERS];
  2603. int i;
  2604. if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
  2605. (avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
  2606. src = &cur->f;
  2607. else if (last)
  2608. src = &last->f;
  2609. else
  2610. return;
  2611. if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
  2612. picture_structure == PICT_FRAME &&
  2613. avctx->codec_id != AV_CODEC_ID_SVQ3) {
  2614. for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
  2615. offset[i] = 0;
  2616. }else{
  2617. offset[0]= y * src->linesize[0];
  2618. offset[1]=
  2619. offset[2]= (y >> vshift) * src->linesize[1];
  2620. for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
  2621. offset[i] = 0;
  2622. }
  2623. emms_c();
  2624. avctx->draw_horiz_band(avctx, src, offset,
  2625. y, picture_structure, h);
  2626. }
  2627. }
  2628. void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
  2629. {
  2630. int draw_edges = s->unrestricted_mv && !s->intra_only;
  2631. ff_draw_horiz_band(s->avctx, &s->dsp, s->current_picture_ptr,
  2632. s->last_picture_ptr, y, h, s->picture_structure,
  2633. s->first_field, draw_edges, s->low_delay,
  2634. s->v_edge_pos, s->h_edge_pos);
  2635. }
  2636. void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
  2637. const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
  2638. const int uvlinesize = s->current_picture.f.linesize[1];
  2639. const int mb_size= 4 - s->avctx->lowres;
  2640. s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
  2641. s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
  2642. s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
  2643. s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
  2644. s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
  2645. s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
  2646. //block_index is not used by mpeg2, so it is not affected by chroma_format
  2647. s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
  2648. s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
  2649. s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
  2650. if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
  2651. {
  2652. if(s->picture_structure==PICT_FRAME){
  2653. s->dest[0] += s->mb_y * linesize << mb_size;
  2654. s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
  2655. s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
  2656. }else{
  2657. s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
  2658. s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
  2659. s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
  2660. av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
  2661. }
  2662. }
  2663. }
  2664. /**
  2665. * Permute an 8x8 block.
  2666. * @param block the block which will be permuted according to the given permutation vector
  2667. * @param permutation the permutation vector
  2668. * @param last the last non zero coefficient in scantable order, used to speed the permutation up
  2669. * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
  2670. * (inverse) permutated to scantable order!
  2671. */
  2672. void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
  2673. {
  2674. int i;
  2675. int16_t temp[64];
  2676. if(last<=0) return;
  2677. //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
  2678. for(i=0; i<=last; i++){
  2679. const int j= scantable[i];
  2680. temp[j]= block[j];
  2681. block[j]=0;
  2682. }
  2683. for(i=0; i<=last; i++){
  2684. const int j= scantable[i];
  2685. const int perm_j= permutation[j];
  2686. block[perm_j]= temp[j];
  2687. }
  2688. }
  2689. void ff_mpeg_flush(AVCodecContext *avctx){
  2690. int i;
  2691. MpegEncContext *s = avctx->priv_data;
  2692. if(s==NULL || s->picture==NULL)
  2693. return;
  2694. for (i = 0; i < MAX_PICTURE_COUNT; i++)
  2695. ff_mpeg_unref_picture(s, &s->picture[i]);
  2696. s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
  2697. ff_mpeg_unref_picture(s, &s->current_picture);
  2698. ff_mpeg_unref_picture(s, &s->last_picture);
  2699. ff_mpeg_unref_picture(s, &s->next_picture);
  2700. s->mb_x= s->mb_y= 0;
  2701. s->closed_gop= 0;
  2702. s->parse_context.state= -1;
  2703. s->parse_context.frame_start_found= 0;
  2704. s->parse_context.overread= 0;
  2705. s->parse_context.overread_index= 0;
  2706. s->parse_context.index= 0;
  2707. s->parse_context.last_index= 0;
  2708. s->bitstream_buffer_size=0;
  2709. s->pp_time=0;
  2710. }
  2711. static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
  2712. int16_t *block, int n, int qscale)
  2713. {
  2714. int i, level, nCoeffs;
  2715. const uint16_t *quant_matrix;
  2716. nCoeffs= s->block_last_index[n];
  2717. block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
  2718. /* XXX: only mpeg1 */
  2719. quant_matrix = s->intra_matrix;
  2720. for(i=1;i<=nCoeffs;i++) {
  2721. int j= s->intra_scantable.permutated[i];
  2722. level = block[j];
  2723. if (level) {
  2724. if (level < 0) {
  2725. level = -level;
  2726. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  2727. level = (level - 1) | 1;
  2728. level = -level;
  2729. } else {
  2730. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  2731. level = (level - 1) | 1;
  2732. }
  2733. block[j] = level;
  2734. }
  2735. }
  2736. }
  2737. static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
  2738. int16_t *block, int n, int qscale)
  2739. {
  2740. int i, level, nCoeffs;
  2741. const uint16_t *quant_matrix;
  2742. nCoeffs= s->block_last_index[n];
  2743. quant_matrix = s->inter_matrix;
  2744. for(i=0; i<=nCoeffs; i++) {
  2745. int j= s->intra_scantable.permutated[i];
  2746. level = block[j];
  2747. if (level) {
  2748. if (level < 0) {
  2749. level = -level;
  2750. level = (((level << 1) + 1) * qscale *
  2751. ((int) (quant_matrix[j]))) >> 4;
  2752. level = (level - 1) | 1;
  2753. level = -level;
  2754. } else {
  2755. level = (((level << 1) + 1) * qscale *
  2756. ((int) (quant_matrix[j]))) >> 4;
  2757. level = (level - 1) | 1;
  2758. }
  2759. block[j] = level;
  2760. }
  2761. }
  2762. }
  2763. static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
  2764. int16_t *block, int n, int qscale)
  2765. {
  2766. int i, level, nCoeffs;
  2767. const uint16_t *quant_matrix;
  2768. if(s->alternate_scan) nCoeffs= 63;
  2769. else nCoeffs= s->block_last_index[n];
  2770. block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
  2771. quant_matrix = s->intra_matrix;
  2772. for(i=1;i<=nCoeffs;i++) {
  2773. int j= s->intra_scantable.permutated[i];
  2774. level = block[j];
  2775. if (level) {
  2776. if (level < 0) {
  2777. level = -level;
  2778. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  2779. level = -level;
  2780. } else {
  2781. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  2782. }
  2783. block[j] = level;
  2784. }
  2785. }
  2786. }
  2787. static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
  2788. int16_t *block, int n, int qscale)
  2789. {
  2790. int i, level, nCoeffs;
  2791. const uint16_t *quant_matrix;
  2792. int sum=-1;
  2793. if(s->alternate_scan) nCoeffs= 63;
  2794. else nCoeffs= s->block_last_index[n];
  2795. block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
  2796. sum += block[0];
  2797. quant_matrix = s->intra_matrix;
  2798. for(i=1;i<=nCoeffs;i++) {
  2799. int j= s->intra_scantable.permutated[i];
  2800. level = block[j];
  2801. if (level) {
  2802. if (level < 0) {
  2803. level = -level;
  2804. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  2805. level = -level;
  2806. } else {
  2807. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  2808. }
  2809. block[j] = level;
  2810. sum+=level;
  2811. }
  2812. }
  2813. block[63]^=sum&1;
  2814. }
  2815. static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
  2816. int16_t *block, int n, int qscale)
  2817. {
  2818. int i, level, nCoeffs;
  2819. const uint16_t *quant_matrix;
  2820. int sum=-1;
  2821. if(s->alternate_scan) nCoeffs= 63;
  2822. else nCoeffs= s->block_last_index[n];
  2823. quant_matrix = s->inter_matrix;
  2824. for(i=0; i<=nCoeffs; i++) {
  2825. int j= s->intra_scantable.permutated[i];
  2826. level = block[j];
  2827. if (level) {
  2828. if (level < 0) {
  2829. level = -level;
  2830. level = (((level << 1) + 1) * qscale *
  2831. ((int) (quant_matrix[j]))) >> 4;
  2832. level = -level;
  2833. } else {
  2834. level = (((level << 1) + 1) * qscale *
  2835. ((int) (quant_matrix[j]))) >> 4;
  2836. }
  2837. block[j] = level;
  2838. sum+=level;
  2839. }
  2840. }
  2841. block[63]^=sum&1;
  2842. }
  2843. static void dct_unquantize_h263_intra_c(MpegEncContext *s,
  2844. int16_t *block, int n, int qscale)
  2845. {
  2846. int i, level, qmul, qadd;
  2847. int nCoeffs;
  2848. av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
  2849. qmul = qscale << 1;
  2850. if (!s->h263_aic) {
  2851. block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
  2852. qadd = (qscale - 1) | 1;
  2853. }else{
  2854. qadd = 0;
  2855. }
  2856. if(s->ac_pred)
  2857. nCoeffs=63;
  2858. else
  2859. nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
  2860. for(i=1; i<=nCoeffs; i++) {
  2861. level = block[i];
  2862. if (level) {
  2863. if (level < 0) {
  2864. level = level * qmul - qadd;
  2865. } else {
  2866. level = level * qmul + qadd;
  2867. }
  2868. block[i] = level;
  2869. }
  2870. }
  2871. }
  2872. static void dct_unquantize_h263_inter_c(MpegEncContext *s,
  2873. int16_t *block, int n, int qscale)
  2874. {
  2875. int i, level, qmul, qadd;
  2876. int nCoeffs;
  2877. av_assert2(s->block_last_index[n]>=0);
  2878. qadd = (qscale - 1) | 1;
  2879. qmul = qscale << 1;
  2880. nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
  2881. for(i=0; i<=nCoeffs; i++) {
  2882. level = block[i];
  2883. if (level) {
  2884. if (level < 0) {
  2885. level = level * qmul - qadd;
  2886. } else {
  2887. level = level * qmul + qadd;
  2888. }
  2889. block[i] = level;
  2890. }
  2891. }
  2892. }
  2893. /**
  2894. * set qscale and update qscale dependent variables.
  2895. */
  2896. void ff_set_qscale(MpegEncContext * s, int qscale)
  2897. {
  2898. if (qscale < 1)
  2899. qscale = 1;
  2900. else if (qscale > 31)
  2901. qscale = 31;
  2902. s->qscale = qscale;
  2903. s->chroma_qscale= s->chroma_qscale_table[qscale];
  2904. s->y_dc_scale= s->y_dc_scale_table[ qscale ];
  2905. s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
  2906. }
  2907. void ff_MPV_report_decode_progress(MpegEncContext *s)
  2908. {
  2909. if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
  2910. ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
  2911. }
  2912. #if CONFIG_ERROR_RESILIENCE
  2913. void ff_mpeg_er_frame_start(MpegEncContext *s)
  2914. {
  2915. ERContext *er = &s->er;
  2916. er->cur_pic = s->current_picture_ptr;
  2917. er->last_pic = s->last_picture_ptr;
  2918. er->next_pic = s->next_picture_ptr;
  2919. er->pp_time = s->pp_time;
  2920. er->pb_time = s->pb_time;
  2921. er->quarter_sample = s->quarter_sample;
  2922. er->partitioned_frame = s->partitioned_frame;
  2923. ff_er_frame_start(er);
  2924. }
  2925. #endif /* CONFIG_ERROR_RESILIENCE */