You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2468 lines
83KB

  1. /*
  2. * The simplest mpeg encoder (well, it was the simplest!)
  3. * Copyright (c) 2000,2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
  7. *
  8. * This file is part of Libav.
  9. *
  10. * Libav is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * Libav is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with Libav; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. /**
  25. * @file
  26. * The simplest mpeg encoder (well, it was the simplest!).
  27. */
  28. #include "libavutil/attributes.h"
  29. #include "libavutil/avassert.h"
  30. #include "libavutil/imgutils.h"
  31. #include "libavutil/internal.h"
  32. #include "libavutil/timer.h"
  33. #include "avcodec.h"
  34. #include "dsputil.h"
  35. #include "internal.h"
  36. #include "mathops.h"
  37. #include "mpegutils.h"
  38. #include "mpegvideo.h"
  39. #include "mjpegenc.h"
  40. #include "msmpeg4.h"
  41. #include "xvmc_internal.h"
  42. #include "thread.h"
  43. #include <limits.h>
  44. static const uint8_t ff_default_chroma_qscale_table[32] = {
  45. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  46. 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
  47. 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
  48. };
  49. const uint8_t ff_mpeg1_dc_scale_table[128] = {
  50. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  51. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  52. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  53. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  54. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  55. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  56. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  57. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  58. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  59. };
  60. static const uint8_t mpeg2_dc_scale_table1[128] = {
  61. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  62. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  63. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  64. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  65. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  66. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  67. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  68. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  69. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  70. };
  71. static const uint8_t mpeg2_dc_scale_table2[128] = {
  72. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  73. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  74. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  75. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  76. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  77. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  78. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  79. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  80. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  81. };
  82. static const uint8_t mpeg2_dc_scale_table3[128] = {
  83. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  84. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  85. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  86. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  87. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  88. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  89. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  90. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  91. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  92. };
  93. const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
  94. ff_mpeg1_dc_scale_table,
  95. mpeg2_dc_scale_table1,
  96. mpeg2_dc_scale_table2,
  97. mpeg2_dc_scale_table3,
  98. };
  99. const enum AVPixelFormat ff_pixfmt_list_420[] = {
  100. AV_PIX_FMT_YUV420P,
  101. AV_PIX_FMT_NONE
  102. };
  103. static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
  104. int16_t *block, int n, int qscale)
  105. {
  106. int i, level, nCoeffs;
  107. const uint16_t *quant_matrix;
  108. nCoeffs= s->block_last_index[n];
  109. if (n < 4)
  110. block[0] = block[0] * s->y_dc_scale;
  111. else
  112. block[0] = block[0] * s->c_dc_scale;
  113. /* XXX: only mpeg1 */
  114. quant_matrix = s->intra_matrix;
  115. for(i=1;i<=nCoeffs;i++) {
  116. int j= s->intra_scantable.permutated[i];
  117. level = block[j];
  118. if (level) {
  119. if (level < 0) {
  120. level = -level;
  121. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  122. level = (level - 1) | 1;
  123. level = -level;
  124. } else {
  125. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  126. level = (level - 1) | 1;
  127. }
  128. block[j] = level;
  129. }
  130. }
  131. }
  132. static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
  133. int16_t *block, int n, int qscale)
  134. {
  135. int i, level, nCoeffs;
  136. const uint16_t *quant_matrix;
  137. nCoeffs= s->block_last_index[n];
  138. quant_matrix = s->inter_matrix;
  139. for(i=0; i<=nCoeffs; i++) {
  140. int j= s->intra_scantable.permutated[i];
  141. level = block[j];
  142. if (level) {
  143. if (level < 0) {
  144. level = -level;
  145. level = (((level << 1) + 1) * qscale *
  146. ((int) (quant_matrix[j]))) >> 4;
  147. level = (level - 1) | 1;
  148. level = -level;
  149. } else {
  150. level = (((level << 1) + 1) * qscale *
  151. ((int) (quant_matrix[j]))) >> 4;
  152. level = (level - 1) | 1;
  153. }
  154. block[j] = level;
  155. }
  156. }
  157. }
  158. static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
  159. int16_t *block, int n, int qscale)
  160. {
  161. int i, level, nCoeffs;
  162. const uint16_t *quant_matrix;
  163. if(s->alternate_scan) nCoeffs= 63;
  164. else nCoeffs= s->block_last_index[n];
  165. if (n < 4)
  166. block[0] = block[0] * s->y_dc_scale;
  167. else
  168. block[0] = block[0] * s->c_dc_scale;
  169. quant_matrix = s->intra_matrix;
  170. for(i=1;i<=nCoeffs;i++) {
  171. int j= s->intra_scantable.permutated[i];
  172. level = block[j];
  173. if (level) {
  174. if (level < 0) {
  175. level = -level;
  176. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  177. level = -level;
  178. } else {
  179. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  180. }
  181. block[j] = level;
  182. }
  183. }
  184. }
  185. static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
  186. int16_t *block, int n, int qscale)
  187. {
  188. int i, level, nCoeffs;
  189. const uint16_t *quant_matrix;
  190. int sum=-1;
  191. if(s->alternate_scan) nCoeffs= 63;
  192. else nCoeffs= s->block_last_index[n];
  193. if (n < 4)
  194. block[0] = block[0] * s->y_dc_scale;
  195. else
  196. block[0] = block[0] * s->c_dc_scale;
  197. quant_matrix = s->intra_matrix;
  198. for(i=1;i<=nCoeffs;i++) {
  199. int j= s->intra_scantable.permutated[i];
  200. level = block[j];
  201. if (level) {
  202. if (level < 0) {
  203. level = -level;
  204. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  205. level = -level;
  206. } else {
  207. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  208. }
  209. block[j] = level;
  210. sum+=level;
  211. }
  212. }
  213. block[63]^=sum&1;
  214. }
  215. static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
  216. int16_t *block, int n, int qscale)
  217. {
  218. int i, level, nCoeffs;
  219. const uint16_t *quant_matrix;
  220. int sum=-1;
  221. if(s->alternate_scan) nCoeffs= 63;
  222. else nCoeffs= s->block_last_index[n];
  223. quant_matrix = s->inter_matrix;
  224. for(i=0; i<=nCoeffs; i++) {
  225. int j= s->intra_scantable.permutated[i];
  226. level = block[j];
  227. if (level) {
  228. if (level < 0) {
  229. level = -level;
  230. level = (((level << 1) + 1) * qscale *
  231. ((int) (quant_matrix[j]))) >> 4;
  232. level = -level;
  233. } else {
  234. level = (((level << 1) + 1) * qscale *
  235. ((int) (quant_matrix[j]))) >> 4;
  236. }
  237. block[j] = level;
  238. sum+=level;
  239. }
  240. }
  241. block[63]^=sum&1;
  242. }
  243. static void dct_unquantize_h263_intra_c(MpegEncContext *s,
  244. int16_t *block, int n, int qscale)
  245. {
  246. int i, level, qmul, qadd;
  247. int nCoeffs;
  248. assert(s->block_last_index[n]>=0);
  249. qmul = qscale << 1;
  250. if (!s->h263_aic) {
  251. if (n < 4)
  252. block[0] = block[0] * s->y_dc_scale;
  253. else
  254. block[0] = block[0] * s->c_dc_scale;
  255. qadd = (qscale - 1) | 1;
  256. }else{
  257. qadd = 0;
  258. }
  259. if(s->ac_pred)
  260. nCoeffs=63;
  261. else
  262. nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
  263. for(i=1; i<=nCoeffs; i++) {
  264. level = block[i];
  265. if (level) {
  266. if (level < 0) {
  267. level = level * qmul - qadd;
  268. } else {
  269. level = level * qmul + qadd;
  270. }
  271. block[i] = level;
  272. }
  273. }
  274. }
  275. static void dct_unquantize_h263_inter_c(MpegEncContext *s,
  276. int16_t *block, int n, int qscale)
  277. {
  278. int i, level, qmul, qadd;
  279. int nCoeffs;
  280. assert(s->block_last_index[n]>=0);
  281. qadd = (qscale - 1) | 1;
  282. qmul = qscale << 1;
  283. nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
  284. for(i=0; i<=nCoeffs; i++) {
  285. level = block[i];
  286. if (level) {
  287. if (level < 0) {
  288. level = level * qmul - qadd;
  289. } else {
  290. level = level * qmul + qadd;
  291. }
  292. block[i] = level;
  293. }
  294. }
  295. }
  296. static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
  297. int (*mv)[2][4][2],
  298. int mb_x, int mb_y, int mb_intra, int mb_skipped)
  299. {
  300. MpegEncContext *s = opaque;
  301. s->mv_dir = mv_dir;
  302. s->mv_type = mv_type;
  303. s->mb_intra = mb_intra;
  304. s->mb_skipped = mb_skipped;
  305. s->mb_x = mb_x;
  306. s->mb_y = mb_y;
  307. memcpy(s->mv, mv, sizeof(*mv));
  308. ff_init_block_index(s);
  309. ff_update_block_index(s);
  310. s->dsp.clear_blocks(s->block[0]);
  311. s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
  312. s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
  313. s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
  314. assert(ref == 0);
  315. ff_MPV_decode_mb(s, s->block);
  316. }
  317. /* init common dct for both encoder and decoder */
  318. av_cold int ff_dct_common_init(MpegEncContext *s)
  319. {
  320. ff_dsputil_init(&s->dsp, s->avctx);
  321. ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
  322. ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
  323. s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
  324. s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
  325. s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
  326. s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
  327. s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
  328. if (s->flags & CODEC_FLAG_BITEXACT)
  329. s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
  330. s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
  331. if (ARCH_ARM)
  332. ff_MPV_common_init_arm(s);
  333. if (ARCH_PPC)
  334. ff_MPV_common_init_ppc(s);
  335. if (ARCH_X86)
  336. ff_MPV_common_init_x86(s);
  337. /* load & permutate scantables
  338. * note: only wmv uses different ones
  339. */
  340. if (s->alternate_scan) {
  341. ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
  342. ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
  343. } else {
  344. ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
  345. ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
  346. }
  347. ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
  348. ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
  349. return 0;
  350. }
  351. static int frame_size_alloc(MpegEncContext *s, int linesize)
  352. {
  353. int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
  354. // edge emu needs blocksize + filter length - 1
  355. // (= 17x17 for halfpel / 21x21 for h264)
  356. // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
  357. // at uvlinesize. It supports only YUV420 so 24x24 is enough
  358. // linesize * interlaced * MBsize
  359. FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 2 * 24,
  360. fail);
  361. FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 3,
  362. fail)
  363. s->me.temp = s->me.scratchpad;
  364. s->rd_scratchpad = s->me.scratchpad;
  365. s->b_scratchpad = s->me.scratchpad;
  366. s->obmc_scratchpad = s->me.scratchpad + 16;
  367. return 0;
  368. fail:
  369. av_freep(&s->edge_emu_buffer);
  370. return AVERROR(ENOMEM);
  371. }
  372. /**
  373. * Allocate a frame buffer
  374. */
  375. static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
  376. {
  377. int edges_needed = av_codec_is_encoder(s->avctx->codec);
  378. int r, ret;
  379. pic->tf.f = &pic->f;
  380. if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
  381. s->codec_id != AV_CODEC_ID_VC1IMAGE &&
  382. s->codec_id != AV_CODEC_ID_MSS2) {
  383. if (edges_needed) {
  384. pic->f.width = s->avctx->width + 2 * EDGE_WIDTH;
  385. pic->f.height = s->avctx->height + 2 * EDGE_WIDTH;
  386. }
  387. r = ff_thread_get_buffer(s->avctx, &pic->tf,
  388. pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
  389. } else {
  390. pic->f.width = s->avctx->width;
  391. pic->f.height = s->avctx->height;
  392. pic->f.format = s->avctx->pix_fmt;
  393. r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
  394. }
  395. if (r < 0 || !pic->f.buf[0]) {
  396. av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
  397. r, pic->f.data[0]);
  398. return -1;
  399. }
  400. if (edges_needed) {
  401. int i;
  402. for (i = 0; pic->f.data[i]; i++) {
  403. int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
  404. pic->f.linesize[i] +
  405. (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
  406. pic->f.data[i] += offset;
  407. }
  408. pic->f.width = s->avctx->width;
  409. pic->f.height = s->avctx->height;
  410. }
  411. if (s->avctx->hwaccel) {
  412. assert(!pic->hwaccel_picture_private);
  413. if (s->avctx->hwaccel->priv_data_size) {
  414. pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
  415. if (!pic->hwaccel_priv_buf) {
  416. av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
  417. return -1;
  418. }
  419. pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
  420. }
  421. }
  422. if (s->linesize && (s->linesize != pic->f.linesize[0] ||
  423. s->uvlinesize != pic->f.linesize[1])) {
  424. av_log(s->avctx, AV_LOG_ERROR,
  425. "get_buffer() failed (stride changed)\n");
  426. ff_mpeg_unref_picture(s, pic);
  427. return -1;
  428. }
  429. if (pic->f.linesize[1] != pic->f.linesize[2]) {
  430. av_log(s->avctx, AV_LOG_ERROR,
  431. "get_buffer() failed (uv stride mismatch)\n");
  432. ff_mpeg_unref_picture(s, pic);
  433. return -1;
  434. }
  435. if (!s->edge_emu_buffer &&
  436. (ret = frame_size_alloc(s, pic->f.linesize[0])) < 0) {
  437. av_log(s->avctx, AV_LOG_ERROR,
  438. "get_buffer() failed to allocate context scratch buffers.\n");
  439. ff_mpeg_unref_picture(s, pic);
  440. return ret;
  441. }
  442. return 0;
  443. }
  444. void ff_free_picture_tables(Picture *pic)
  445. {
  446. int i;
  447. av_buffer_unref(&pic->mb_var_buf);
  448. av_buffer_unref(&pic->mc_mb_var_buf);
  449. av_buffer_unref(&pic->mb_mean_buf);
  450. av_buffer_unref(&pic->mbskip_table_buf);
  451. av_buffer_unref(&pic->qscale_table_buf);
  452. av_buffer_unref(&pic->mb_type_buf);
  453. for (i = 0; i < 2; i++) {
  454. av_buffer_unref(&pic->motion_val_buf[i]);
  455. av_buffer_unref(&pic->ref_index_buf[i]);
  456. }
  457. }
  458. static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
  459. {
  460. const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
  461. const int mb_array_size = s->mb_stride * s->mb_height;
  462. const int b8_array_size = s->b8_stride * s->mb_height * 2;
  463. int i;
  464. pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
  465. pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
  466. pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
  467. sizeof(uint32_t));
  468. if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
  469. return AVERROR(ENOMEM);
  470. if (s->encoding) {
  471. pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
  472. pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
  473. pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
  474. if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
  475. return AVERROR(ENOMEM);
  476. }
  477. if (s->out_format == FMT_H263 || s->encoding) {
  478. int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
  479. int ref_index_size = 4 * mb_array_size;
  480. for (i = 0; mv_size && i < 2; i++) {
  481. pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
  482. pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
  483. if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
  484. return AVERROR(ENOMEM);
  485. }
  486. }
  487. return 0;
  488. }
  489. static int make_tables_writable(Picture *pic)
  490. {
  491. int ret, i;
  492. #define MAKE_WRITABLE(table) \
  493. do {\
  494. if (pic->table &&\
  495. (ret = av_buffer_make_writable(&pic->table)) < 0)\
  496. return ret;\
  497. } while (0)
  498. MAKE_WRITABLE(mb_var_buf);
  499. MAKE_WRITABLE(mc_mb_var_buf);
  500. MAKE_WRITABLE(mb_mean_buf);
  501. MAKE_WRITABLE(mbskip_table_buf);
  502. MAKE_WRITABLE(qscale_table_buf);
  503. MAKE_WRITABLE(mb_type_buf);
  504. for (i = 0; i < 2; i++) {
  505. MAKE_WRITABLE(motion_val_buf[i]);
  506. MAKE_WRITABLE(ref_index_buf[i]);
  507. }
  508. return 0;
  509. }
  510. /**
  511. * Allocate a Picture.
  512. * The pixels are allocated/set by calling get_buffer() if shared = 0
  513. */
  514. int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
  515. {
  516. int i, ret;
  517. if (shared) {
  518. assert(pic->f.data[0]);
  519. pic->shared = 1;
  520. } else {
  521. assert(!pic->f.buf[0]);
  522. if (alloc_frame_buffer(s, pic) < 0)
  523. return -1;
  524. s->linesize = pic->f.linesize[0];
  525. s->uvlinesize = pic->f.linesize[1];
  526. }
  527. if (!pic->qscale_table_buf)
  528. ret = alloc_picture_tables(s, pic);
  529. else
  530. ret = make_tables_writable(pic);
  531. if (ret < 0)
  532. goto fail;
  533. if (s->encoding) {
  534. pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
  535. pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
  536. pic->mb_mean = pic->mb_mean_buf->data;
  537. }
  538. pic->mbskip_table = pic->mbskip_table_buf->data;
  539. pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
  540. pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
  541. if (pic->motion_val_buf[0]) {
  542. for (i = 0; i < 2; i++) {
  543. pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
  544. pic->ref_index[i] = pic->ref_index_buf[i]->data;
  545. }
  546. }
  547. return 0;
  548. fail:
  549. av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
  550. ff_mpeg_unref_picture(s, pic);
  551. ff_free_picture_tables(pic);
  552. return AVERROR(ENOMEM);
  553. }
  554. /**
  555. * Deallocate a picture.
  556. */
  557. void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
  558. {
  559. int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
  560. pic->tf.f = &pic->f;
  561. /* WM Image / Screen codecs allocate internal buffers with different
  562. * dimensions / colorspaces; ignore user-defined callbacks for these. */
  563. if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
  564. s->codec_id != AV_CODEC_ID_VC1IMAGE &&
  565. s->codec_id != AV_CODEC_ID_MSS2)
  566. ff_thread_release_buffer(s->avctx, &pic->tf);
  567. else
  568. av_frame_unref(&pic->f);
  569. av_buffer_unref(&pic->hwaccel_priv_buf);
  570. if (pic->needs_realloc)
  571. ff_free_picture_tables(pic);
  572. memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
  573. }
  574. static int update_picture_tables(Picture *dst, Picture *src)
  575. {
  576. int i;
  577. #define UPDATE_TABLE(table)\
  578. do {\
  579. if (src->table &&\
  580. (!dst->table || dst->table->buffer != src->table->buffer)) {\
  581. av_buffer_unref(&dst->table);\
  582. dst->table = av_buffer_ref(src->table);\
  583. if (!dst->table) {\
  584. ff_free_picture_tables(dst);\
  585. return AVERROR(ENOMEM);\
  586. }\
  587. }\
  588. } while (0)
  589. UPDATE_TABLE(mb_var_buf);
  590. UPDATE_TABLE(mc_mb_var_buf);
  591. UPDATE_TABLE(mb_mean_buf);
  592. UPDATE_TABLE(mbskip_table_buf);
  593. UPDATE_TABLE(qscale_table_buf);
  594. UPDATE_TABLE(mb_type_buf);
  595. for (i = 0; i < 2; i++) {
  596. UPDATE_TABLE(motion_val_buf[i]);
  597. UPDATE_TABLE(ref_index_buf[i]);
  598. }
  599. dst->mb_var = src->mb_var;
  600. dst->mc_mb_var = src->mc_mb_var;
  601. dst->mb_mean = src->mb_mean;
  602. dst->mbskip_table = src->mbskip_table;
  603. dst->qscale_table = src->qscale_table;
  604. dst->mb_type = src->mb_type;
  605. for (i = 0; i < 2; i++) {
  606. dst->motion_val[i] = src->motion_val[i];
  607. dst->ref_index[i] = src->ref_index[i];
  608. }
  609. return 0;
  610. }
  611. int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
  612. {
  613. int ret;
  614. av_assert0(!dst->f.buf[0]);
  615. av_assert0(src->f.buf[0]);
  616. src->tf.f = &src->f;
  617. dst->tf.f = &dst->f;
  618. ret = ff_thread_ref_frame(&dst->tf, &src->tf);
  619. if (ret < 0)
  620. goto fail;
  621. ret = update_picture_tables(dst, src);
  622. if (ret < 0)
  623. goto fail;
  624. if (src->hwaccel_picture_private) {
  625. dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
  626. if (!dst->hwaccel_priv_buf)
  627. goto fail;
  628. dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
  629. }
  630. dst->field_picture = src->field_picture;
  631. dst->mb_var_sum = src->mb_var_sum;
  632. dst->mc_mb_var_sum = src->mc_mb_var_sum;
  633. dst->b_frame_score = src->b_frame_score;
  634. dst->needs_realloc = src->needs_realloc;
  635. dst->reference = src->reference;
  636. dst->shared = src->shared;
  637. return 0;
  638. fail:
  639. ff_mpeg_unref_picture(s, dst);
  640. return ret;
  641. }
  642. static void exchange_uv(MpegEncContext *s)
  643. {
  644. int16_t (*tmp)[64];
  645. tmp = s->pblocks[4];
  646. s->pblocks[4] = s->pblocks[5];
  647. s->pblocks[5] = tmp;
  648. }
  649. static int init_duplicate_context(MpegEncContext *s)
  650. {
  651. int y_size = s->b8_stride * (2 * s->mb_height + 1);
  652. int c_size = s->mb_stride * (s->mb_height + 1);
  653. int yc_size = y_size + 2 * c_size;
  654. int i;
  655. s->edge_emu_buffer =
  656. s->me.scratchpad =
  657. s->me.temp =
  658. s->rd_scratchpad =
  659. s->b_scratchpad =
  660. s->obmc_scratchpad = NULL;
  661. if (s->encoding) {
  662. FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
  663. ME_MAP_SIZE * sizeof(uint32_t), fail)
  664. FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
  665. ME_MAP_SIZE * sizeof(uint32_t), fail)
  666. if (s->avctx->noise_reduction) {
  667. FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
  668. 2 * 64 * sizeof(int), fail)
  669. }
  670. }
  671. FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
  672. s->block = s->blocks[0];
  673. for (i = 0; i < 12; i++) {
  674. s->pblocks[i] = &s->block[i];
  675. }
  676. if (s->avctx->codec_tag == AV_RL32("VCR2"))
  677. exchange_uv(s);
  678. if (s->out_format == FMT_H263) {
  679. /* ac values */
  680. FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
  681. yc_size * sizeof(int16_t) * 16, fail);
  682. s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
  683. s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
  684. s->ac_val[2] = s->ac_val[1] + c_size;
  685. }
  686. return 0;
  687. fail:
  688. return -1; // free() through ff_MPV_common_end()
  689. }
  690. static void free_duplicate_context(MpegEncContext *s)
  691. {
  692. if (s == NULL)
  693. return;
  694. av_freep(&s->edge_emu_buffer);
  695. av_freep(&s->me.scratchpad);
  696. s->me.temp =
  697. s->rd_scratchpad =
  698. s->b_scratchpad =
  699. s->obmc_scratchpad = NULL;
  700. av_freep(&s->dct_error_sum);
  701. av_freep(&s->me.map);
  702. av_freep(&s->me.score_map);
  703. av_freep(&s->blocks);
  704. av_freep(&s->ac_val_base);
  705. s->block = NULL;
  706. }
  707. static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
  708. {
  709. #define COPY(a) bak->a = src->a
  710. COPY(edge_emu_buffer);
  711. COPY(me.scratchpad);
  712. COPY(me.temp);
  713. COPY(rd_scratchpad);
  714. COPY(b_scratchpad);
  715. COPY(obmc_scratchpad);
  716. COPY(me.map);
  717. COPY(me.score_map);
  718. COPY(blocks);
  719. COPY(block);
  720. COPY(start_mb_y);
  721. COPY(end_mb_y);
  722. COPY(me.map_generation);
  723. COPY(pb);
  724. COPY(dct_error_sum);
  725. COPY(dct_count[0]);
  726. COPY(dct_count[1]);
  727. COPY(ac_val_base);
  728. COPY(ac_val[0]);
  729. COPY(ac_val[1]);
  730. COPY(ac_val[2]);
  731. #undef COPY
  732. }
  733. int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
  734. {
  735. MpegEncContext bak;
  736. int i, ret;
  737. // FIXME copy only needed parts
  738. // START_TIMER
  739. backup_duplicate_context(&bak, dst);
  740. memcpy(dst, src, sizeof(MpegEncContext));
  741. backup_duplicate_context(dst, &bak);
  742. for (i = 0; i < 12; i++) {
  743. dst->pblocks[i] = &dst->block[i];
  744. }
  745. if (dst->avctx->codec_tag == AV_RL32("VCR2"))
  746. exchange_uv(dst);
  747. if (!dst->edge_emu_buffer &&
  748. (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
  749. av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
  750. "scratch buffers.\n");
  751. return ret;
  752. }
  753. // STOP_TIMER("update_duplicate_context")
  754. // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
  755. return 0;
  756. }
  757. int ff_mpeg_update_thread_context(AVCodecContext *dst,
  758. const AVCodecContext *src)
  759. {
  760. int i, ret;
  761. MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
  762. if (dst == src || !s1->context_initialized)
  763. return 0;
  764. // FIXME can parameters change on I-frames?
  765. // in that case dst may need a reinit
  766. if (!s->context_initialized) {
  767. memcpy(s, s1, sizeof(MpegEncContext));
  768. s->avctx = dst;
  769. s->bitstream_buffer = NULL;
  770. s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
  771. ff_MPV_common_init(s);
  772. }
  773. if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
  774. int err;
  775. s->context_reinit = 0;
  776. s->height = s1->height;
  777. s->width = s1->width;
  778. if ((err = ff_MPV_common_frame_size_change(s)) < 0)
  779. return err;
  780. }
  781. s->avctx->coded_height = s1->avctx->coded_height;
  782. s->avctx->coded_width = s1->avctx->coded_width;
  783. s->avctx->width = s1->avctx->width;
  784. s->avctx->height = s1->avctx->height;
  785. s->coded_picture_number = s1->coded_picture_number;
  786. s->picture_number = s1->picture_number;
  787. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  788. ff_mpeg_unref_picture(s, &s->picture[i]);
  789. if (s1->picture[i].f.buf[0] &&
  790. (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
  791. return ret;
  792. }
  793. #define UPDATE_PICTURE(pic)\
  794. do {\
  795. ff_mpeg_unref_picture(s, &s->pic);\
  796. if (s1->pic.f.buf[0])\
  797. ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
  798. else\
  799. ret = update_picture_tables(&s->pic, &s1->pic);\
  800. if (ret < 0)\
  801. return ret;\
  802. } while (0)
  803. UPDATE_PICTURE(current_picture);
  804. UPDATE_PICTURE(last_picture);
  805. UPDATE_PICTURE(next_picture);
  806. s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
  807. s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
  808. s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
  809. // Error/bug resilience
  810. s->next_p_frame_damaged = s1->next_p_frame_damaged;
  811. s->workaround_bugs = s1->workaround_bugs;
  812. // MPEG4 timing info
  813. memcpy(&s->last_time_base, &s1->last_time_base,
  814. (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
  815. (char *) &s1->last_time_base);
  816. // B-frame info
  817. s->max_b_frames = s1->max_b_frames;
  818. s->low_delay = s1->low_delay;
  819. s->droppable = s1->droppable;
  820. // DivX handling (doesn't work)
  821. s->divx_packed = s1->divx_packed;
  822. if (s1->bitstream_buffer) {
  823. if (s1->bitstream_buffer_size +
  824. FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
  825. av_fast_malloc(&s->bitstream_buffer,
  826. &s->allocated_bitstream_buffer_size,
  827. s1->allocated_bitstream_buffer_size);
  828. s->bitstream_buffer_size = s1->bitstream_buffer_size;
  829. memcpy(s->bitstream_buffer, s1->bitstream_buffer,
  830. s1->bitstream_buffer_size);
  831. memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
  832. FF_INPUT_BUFFER_PADDING_SIZE);
  833. }
  834. // linesize dependend scratch buffer allocation
  835. if (!s->edge_emu_buffer)
  836. if (s1->linesize) {
  837. if (frame_size_alloc(s, s1->linesize) < 0) {
  838. av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
  839. "scratch buffers.\n");
  840. return AVERROR(ENOMEM);
  841. }
  842. } else {
  843. av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
  844. "be allocated due to unknown size.\n");
  845. return AVERROR_BUG;
  846. }
  847. // MPEG2/interlacing info
  848. memcpy(&s->progressive_sequence, &s1->progressive_sequence,
  849. (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
  850. if (!s1->first_field) {
  851. s->last_pict_type = s1->pict_type;
  852. if (s1->current_picture_ptr)
  853. s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
  854. }
  855. return 0;
  856. }
  857. /**
  858. * Set the given MpegEncContext to common defaults
  859. * (same for encoding and decoding).
  860. * The changed fields will not depend upon the
  861. * prior state of the MpegEncContext.
  862. */
  863. void ff_MPV_common_defaults(MpegEncContext *s)
  864. {
  865. s->y_dc_scale_table =
  866. s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
  867. s->chroma_qscale_table = ff_default_chroma_qscale_table;
  868. s->progressive_frame = 1;
  869. s->progressive_sequence = 1;
  870. s->picture_structure = PICT_FRAME;
  871. s->coded_picture_number = 0;
  872. s->picture_number = 0;
  873. s->f_code = 1;
  874. s->b_code = 1;
  875. s->slice_context_count = 1;
  876. }
  877. /**
  878. * Set the given MpegEncContext to defaults for decoding.
  879. * the changed fields will not depend upon
  880. * the prior state of the MpegEncContext.
  881. */
  882. void ff_MPV_decode_defaults(MpegEncContext *s)
  883. {
  884. ff_MPV_common_defaults(s);
  885. }
  886. static int init_er(MpegEncContext *s)
  887. {
  888. ERContext *er = &s->er;
  889. int mb_array_size = s->mb_height * s->mb_stride;
  890. int i;
  891. er->avctx = s->avctx;
  892. er->dsp = &s->dsp;
  893. er->mb_index2xy = s->mb_index2xy;
  894. er->mb_num = s->mb_num;
  895. er->mb_width = s->mb_width;
  896. er->mb_height = s->mb_height;
  897. er->mb_stride = s->mb_stride;
  898. er->b8_stride = s->b8_stride;
  899. er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
  900. er->error_status_table = av_mallocz(mb_array_size);
  901. if (!er->er_temp_buffer || !er->error_status_table)
  902. goto fail;
  903. er->mbskip_table = s->mbskip_table;
  904. er->mbintra_table = s->mbintra_table;
  905. for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
  906. er->dc_val[i] = s->dc_val[i];
  907. er->decode_mb = mpeg_er_decode_mb;
  908. er->opaque = s;
  909. return 0;
  910. fail:
  911. av_freep(&er->er_temp_buffer);
  912. av_freep(&er->error_status_table);
  913. return AVERROR(ENOMEM);
  914. }
  915. /**
  916. * Initialize and allocates MpegEncContext fields dependent on the resolution.
  917. */
  918. static int init_context_frame(MpegEncContext *s)
  919. {
  920. int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
  921. s->mb_width = (s->width + 15) / 16;
  922. s->mb_stride = s->mb_width + 1;
  923. s->b8_stride = s->mb_width * 2 + 1;
  924. s->b4_stride = s->mb_width * 4 + 1;
  925. mb_array_size = s->mb_height * s->mb_stride;
  926. mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
  927. /* set default edge pos, will be overriden
  928. * in decode_header if needed */
  929. s->h_edge_pos = s->mb_width * 16;
  930. s->v_edge_pos = s->mb_height * 16;
  931. s->mb_num = s->mb_width * s->mb_height;
  932. s->block_wrap[0] =
  933. s->block_wrap[1] =
  934. s->block_wrap[2] =
  935. s->block_wrap[3] = s->b8_stride;
  936. s->block_wrap[4] =
  937. s->block_wrap[5] = s->mb_stride;
  938. y_size = s->b8_stride * (2 * s->mb_height + 1);
  939. c_size = s->mb_stride * (s->mb_height + 1);
  940. yc_size = y_size + 2 * c_size;
  941. FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
  942. fail); // error ressilience code looks cleaner with this
  943. for (y = 0; y < s->mb_height; y++)
  944. for (x = 0; x < s->mb_width; x++)
  945. s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
  946. s->mb_index2xy[s->mb_height * s->mb_width] =
  947. (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
  948. if (s->encoding) {
  949. /* Allocate MV tables */
  950. FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
  951. mv_table_size * 2 * sizeof(int16_t), fail);
  952. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
  953. mv_table_size * 2 * sizeof(int16_t), fail);
  954. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
  955. mv_table_size * 2 * sizeof(int16_t), fail);
  956. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
  957. mv_table_size * 2 * sizeof(int16_t), fail);
  958. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
  959. mv_table_size * 2 * sizeof(int16_t), fail);
  960. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
  961. mv_table_size * 2 * sizeof(int16_t), fail);
  962. s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
  963. s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
  964. s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
  965. s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
  966. s->mb_stride + 1;
  967. s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
  968. s->mb_stride + 1;
  969. s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
  970. /* Allocate MB type table */
  971. FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
  972. sizeof(uint16_t), fail); // needed for encoding
  973. FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
  974. sizeof(int), fail);
  975. FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
  976. mb_array_size * sizeof(float), fail);
  977. FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
  978. mb_array_size * sizeof(float), fail);
  979. }
  980. if (s->codec_id == AV_CODEC_ID_MPEG4 ||
  981. (s->flags & CODEC_FLAG_INTERLACED_ME)) {
  982. /* interlaced direct mode decoding tables */
  983. for (i = 0; i < 2; i++) {
  984. int j, k;
  985. for (j = 0; j < 2; j++) {
  986. for (k = 0; k < 2; k++) {
  987. FF_ALLOCZ_OR_GOTO(s->avctx,
  988. s->b_field_mv_table_base[i][j][k],
  989. mv_table_size * 2 * sizeof(int16_t),
  990. fail);
  991. s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
  992. s->mb_stride + 1;
  993. }
  994. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
  995. mb_array_size * 2 * sizeof(uint8_t), fail);
  996. FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
  997. mv_table_size * 2 * sizeof(int16_t), fail);
  998. s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
  999. + s->mb_stride + 1;
  1000. }
  1001. FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
  1002. mb_array_size * 2 * sizeof(uint8_t), fail);
  1003. }
  1004. }
  1005. if (s->out_format == FMT_H263) {
  1006. /* cbp values */
  1007. FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
  1008. s->coded_block = s->coded_block_base + s->b8_stride + 1;
  1009. /* cbp, ac_pred, pred_dir */
  1010. FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
  1011. mb_array_size * sizeof(uint8_t), fail);
  1012. FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
  1013. mb_array_size * sizeof(uint8_t), fail);
  1014. }
  1015. if (s->h263_pred || s->h263_plus || !s->encoding) {
  1016. /* dc values */
  1017. // MN: we need these for error resilience of intra-frames
  1018. FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
  1019. yc_size * sizeof(int16_t), fail);
  1020. s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
  1021. s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
  1022. s->dc_val[2] = s->dc_val[1] + c_size;
  1023. for (i = 0; i < yc_size; i++)
  1024. s->dc_val_base[i] = 1024;
  1025. }
  1026. /* which mb is a intra block */
  1027. FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
  1028. memset(s->mbintra_table, 1, mb_array_size);
  1029. /* init macroblock skip table */
  1030. FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
  1031. // Note the + 1 is for a quicker mpeg4 slice_end detection
  1032. return init_er(s);
  1033. fail:
  1034. return AVERROR(ENOMEM);
  1035. }
  1036. /**
  1037. * init common structure for both encoder and decoder.
  1038. * this assumes that some variables like width/height are already set
  1039. */
  1040. av_cold int ff_MPV_common_init(MpegEncContext *s)
  1041. {
  1042. int i;
  1043. int nb_slices = (HAVE_THREADS &&
  1044. s->avctx->active_thread_type & FF_THREAD_SLICE) ?
  1045. s->avctx->thread_count : 1;
  1046. if (s->encoding && s->avctx->slices)
  1047. nb_slices = s->avctx->slices;
  1048. if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
  1049. s->mb_height = (s->height + 31) / 32 * 2;
  1050. else
  1051. s->mb_height = (s->height + 15) / 16;
  1052. if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
  1053. av_log(s->avctx, AV_LOG_ERROR,
  1054. "decoding to AV_PIX_FMT_NONE is not supported.\n");
  1055. return -1;
  1056. }
  1057. if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
  1058. int max_slices;
  1059. if (s->mb_height)
  1060. max_slices = FFMIN(MAX_THREADS, s->mb_height);
  1061. else
  1062. max_slices = MAX_THREADS;
  1063. av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
  1064. " reducing to %d\n", nb_slices, max_slices);
  1065. nb_slices = max_slices;
  1066. }
  1067. if ((s->width || s->height) &&
  1068. av_image_check_size(s->width, s->height, 0, s->avctx))
  1069. return -1;
  1070. ff_dct_common_init(s);
  1071. s->flags = s->avctx->flags;
  1072. s->flags2 = s->avctx->flags2;
  1073. /* set chroma shifts */
  1074. av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
  1075. &s->chroma_x_shift,
  1076. &s->chroma_y_shift);
  1077. /* convert fourcc to upper case */
  1078. s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
  1079. s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
  1080. FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
  1081. MAX_PICTURE_COUNT * sizeof(Picture), fail);
  1082. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1083. av_frame_unref(&s->picture[i].f);
  1084. }
  1085. memset(&s->next_picture, 0, sizeof(s->next_picture));
  1086. memset(&s->last_picture, 0, sizeof(s->last_picture));
  1087. memset(&s->current_picture, 0, sizeof(s->current_picture));
  1088. av_frame_unref(&s->next_picture.f);
  1089. av_frame_unref(&s->last_picture.f);
  1090. av_frame_unref(&s->current_picture.f);
  1091. if (s->width && s->height) {
  1092. if (init_context_frame(s))
  1093. goto fail;
  1094. s->parse_context.state = -1;
  1095. }
  1096. s->context_initialized = 1;
  1097. s->thread_context[0] = s;
  1098. if (s->width && s->height) {
  1099. if (nb_slices > 1) {
  1100. for (i = 1; i < nb_slices; i++) {
  1101. s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
  1102. memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
  1103. }
  1104. for (i = 0; i < nb_slices; i++) {
  1105. if (init_duplicate_context(s->thread_context[i]) < 0)
  1106. goto fail;
  1107. s->thread_context[i]->start_mb_y =
  1108. (s->mb_height * (i) + nb_slices / 2) / nb_slices;
  1109. s->thread_context[i]->end_mb_y =
  1110. (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
  1111. }
  1112. } else {
  1113. if (init_duplicate_context(s) < 0)
  1114. goto fail;
  1115. s->start_mb_y = 0;
  1116. s->end_mb_y = s->mb_height;
  1117. }
  1118. s->slice_context_count = nb_slices;
  1119. }
  1120. return 0;
  1121. fail:
  1122. ff_MPV_common_end(s);
  1123. return -1;
  1124. }
  1125. /**
  1126. * Frees and resets MpegEncContext fields depending on the resolution.
  1127. * Is used during resolution changes to avoid a full reinitialization of the
  1128. * codec.
  1129. */
  1130. static int free_context_frame(MpegEncContext *s)
  1131. {
  1132. int i, j, k;
  1133. av_freep(&s->mb_type);
  1134. av_freep(&s->p_mv_table_base);
  1135. av_freep(&s->b_forw_mv_table_base);
  1136. av_freep(&s->b_back_mv_table_base);
  1137. av_freep(&s->b_bidir_forw_mv_table_base);
  1138. av_freep(&s->b_bidir_back_mv_table_base);
  1139. av_freep(&s->b_direct_mv_table_base);
  1140. s->p_mv_table = NULL;
  1141. s->b_forw_mv_table = NULL;
  1142. s->b_back_mv_table = NULL;
  1143. s->b_bidir_forw_mv_table = NULL;
  1144. s->b_bidir_back_mv_table = NULL;
  1145. s->b_direct_mv_table = NULL;
  1146. for (i = 0; i < 2; i++) {
  1147. for (j = 0; j < 2; j++) {
  1148. for (k = 0; k < 2; k++) {
  1149. av_freep(&s->b_field_mv_table_base[i][j][k]);
  1150. s->b_field_mv_table[i][j][k] = NULL;
  1151. }
  1152. av_freep(&s->b_field_select_table[i][j]);
  1153. av_freep(&s->p_field_mv_table_base[i][j]);
  1154. s->p_field_mv_table[i][j] = NULL;
  1155. }
  1156. av_freep(&s->p_field_select_table[i]);
  1157. }
  1158. av_freep(&s->dc_val_base);
  1159. av_freep(&s->coded_block_base);
  1160. av_freep(&s->mbintra_table);
  1161. av_freep(&s->cbp_table);
  1162. av_freep(&s->pred_dir_table);
  1163. av_freep(&s->mbskip_table);
  1164. av_freep(&s->er.error_status_table);
  1165. av_freep(&s->er.er_temp_buffer);
  1166. av_freep(&s->mb_index2xy);
  1167. av_freep(&s->lambda_table);
  1168. av_freep(&s->cplx_tab);
  1169. av_freep(&s->bits_tab);
  1170. s->linesize = s->uvlinesize = 0;
  1171. return 0;
  1172. }
  1173. int ff_MPV_common_frame_size_change(MpegEncContext *s)
  1174. {
  1175. int i, err = 0;
  1176. if (s->slice_context_count > 1) {
  1177. for (i = 0; i < s->slice_context_count; i++) {
  1178. free_duplicate_context(s->thread_context[i]);
  1179. }
  1180. for (i = 1; i < s->slice_context_count; i++) {
  1181. av_freep(&s->thread_context[i]);
  1182. }
  1183. } else
  1184. free_duplicate_context(s);
  1185. if ((err = free_context_frame(s)) < 0)
  1186. return err;
  1187. if (s->picture)
  1188. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1189. s->picture[i].needs_realloc = 1;
  1190. }
  1191. s->last_picture_ptr =
  1192. s->next_picture_ptr =
  1193. s->current_picture_ptr = NULL;
  1194. // init
  1195. if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
  1196. s->mb_height = (s->height + 31) / 32 * 2;
  1197. else
  1198. s->mb_height = (s->height + 15) / 16;
  1199. if ((s->width || s->height) &&
  1200. av_image_check_size(s->width, s->height, 0, s->avctx))
  1201. return AVERROR_INVALIDDATA;
  1202. if ((err = init_context_frame(s)))
  1203. goto fail;
  1204. s->thread_context[0] = s;
  1205. if (s->width && s->height) {
  1206. int nb_slices = s->slice_context_count;
  1207. if (nb_slices > 1) {
  1208. for (i = 1; i < nb_slices; i++) {
  1209. s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
  1210. memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
  1211. }
  1212. for (i = 0; i < nb_slices; i++) {
  1213. if (init_duplicate_context(s->thread_context[i]) < 0)
  1214. goto fail;
  1215. s->thread_context[i]->start_mb_y =
  1216. (s->mb_height * (i) + nb_slices / 2) / nb_slices;
  1217. s->thread_context[i]->end_mb_y =
  1218. (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
  1219. }
  1220. } else {
  1221. if (init_duplicate_context(s) < 0)
  1222. goto fail;
  1223. s->start_mb_y = 0;
  1224. s->end_mb_y = s->mb_height;
  1225. }
  1226. s->slice_context_count = nb_slices;
  1227. }
  1228. return 0;
  1229. fail:
  1230. ff_MPV_common_end(s);
  1231. return err;
  1232. }
  1233. /* init common structure for both encoder and decoder */
  1234. void ff_MPV_common_end(MpegEncContext *s)
  1235. {
  1236. int i;
  1237. if (s->slice_context_count > 1) {
  1238. for (i = 0; i < s->slice_context_count; i++) {
  1239. free_duplicate_context(s->thread_context[i]);
  1240. }
  1241. for (i = 1; i < s->slice_context_count; i++) {
  1242. av_freep(&s->thread_context[i]);
  1243. }
  1244. s->slice_context_count = 1;
  1245. } else free_duplicate_context(s);
  1246. av_freep(&s->parse_context.buffer);
  1247. s->parse_context.buffer_size = 0;
  1248. av_freep(&s->bitstream_buffer);
  1249. s->allocated_bitstream_buffer_size = 0;
  1250. if (s->picture) {
  1251. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1252. ff_free_picture_tables(&s->picture[i]);
  1253. ff_mpeg_unref_picture(s, &s->picture[i]);
  1254. }
  1255. }
  1256. av_freep(&s->picture);
  1257. ff_free_picture_tables(&s->last_picture);
  1258. ff_mpeg_unref_picture(s, &s->last_picture);
  1259. ff_free_picture_tables(&s->current_picture);
  1260. ff_mpeg_unref_picture(s, &s->current_picture);
  1261. ff_free_picture_tables(&s->next_picture);
  1262. ff_mpeg_unref_picture(s, &s->next_picture);
  1263. free_context_frame(s);
  1264. s->context_initialized = 0;
  1265. s->last_picture_ptr =
  1266. s->next_picture_ptr =
  1267. s->current_picture_ptr = NULL;
  1268. s->linesize = s->uvlinesize = 0;
  1269. }
  1270. av_cold void ff_init_rl(RLTable *rl,
  1271. uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
  1272. {
  1273. int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
  1274. uint8_t index_run[MAX_RUN + 1];
  1275. int last, run, level, start, end, i;
  1276. /* If table is static, we can quit if rl->max_level[0] is not NULL */
  1277. if (static_store && rl->max_level[0])
  1278. return;
  1279. /* compute max_level[], max_run[] and index_run[] */
  1280. for (last = 0; last < 2; last++) {
  1281. if (last == 0) {
  1282. start = 0;
  1283. end = rl->last;
  1284. } else {
  1285. start = rl->last;
  1286. end = rl->n;
  1287. }
  1288. memset(max_level, 0, MAX_RUN + 1);
  1289. memset(max_run, 0, MAX_LEVEL + 1);
  1290. memset(index_run, rl->n, MAX_RUN + 1);
  1291. for (i = start; i < end; i++) {
  1292. run = rl->table_run[i];
  1293. level = rl->table_level[i];
  1294. if (index_run[run] == rl->n)
  1295. index_run[run] = i;
  1296. if (level > max_level[run])
  1297. max_level[run] = level;
  1298. if (run > max_run[level])
  1299. max_run[level] = run;
  1300. }
  1301. if (static_store)
  1302. rl->max_level[last] = static_store[last];
  1303. else
  1304. rl->max_level[last] = av_malloc(MAX_RUN + 1);
  1305. memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
  1306. if (static_store)
  1307. rl->max_run[last] = static_store[last] + MAX_RUN + 1;
  1308. else
  1309. rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
  1310. memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
  1311. if (static_store)
  1312. rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
  1313. else
  1314. rl->index_run[last] = av_malloc(MAX_RUN + 1);
  1315. memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
  1316. }
  1317. }
  1318. av_cold void ff_init_vlc_rl(RLTable *rl)
  1319. {
  1320. int i, q;
  1321. for (q = 0; q < 32; q++) {
  1322. int qmul = q * 2;
  1323. int qadd = (q - 1) | 1;
  1324. if (q == 0) {
  1325. qmul = 1;
  1326. qadd = 0;
  1327. }
  1328. for (i = 0; i < rl->vlc.table_size; i++) {
  1329. int code = rl->vlc.table[i][0];
  1330. int len = rl->vlc.table[i][1];
  1331. int level, run;
  1332. if (len == 0) { // illegal code
  1333. run = 66;
  1334. level = MAX_LEVEL;
  1335. } else if (len < 0) { // more bits needed
  1336. run = 0;
  1337. level = code;
  1338. } else {
  1339. if (code == rl->n) { // esc
  1340. run = 66;
  1341. level = 0;
  1342. } else {
  1343. run = rl->table_run[code] + 1;
  1344. level = rl->table_level[code] * qmul + qadd;
  1345. if (code >= rl->last) run += 192;
  1346. }
  1347. }
  1348. rl->rl_vlc[q][i].len = len;
  1349. rl->rl_vlc[q][i].level = level;
  1350. rl->rl_vlc[q][i].run = run;
  1351. }
  1352. }
  1353. }
  1354. static void release_unused_pictures(MpegEncContext *s)
  1355. {
  1356. int i;
  1357. /* release non reference frames */
  1358. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1359. if (!s->picture[i].reference)
  1360. ff_mpeg_unref_picture(s, &s->picture[i]);
  1361. }
  1362. }
  1363. static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
  1364. {
  1365. if (pic->f.buf[0] == NULL)
  1366. return 1;
  1367. if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
  1368. return 1;
  1369. return 0;
  1370. }
  1371. static int find_unused_picture(MpegEncContext *s, int shared)
  1372. {
  1373. int i;
  1374. if (shared) {
  1375. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1376. if (s->picture[i].f.buf[0] == NULL)
  1377. return i;
  1378. }
  1379. } else {
  1380. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1381. if (pic_is_unused(s, &s->picture[i]))
  1382. return i;
  1383. }
  1384. }
  1385. return AVERROR_INVALIDDATA;
  1386. }
  1387. int ff_find_unused_picture(MpegEncContext *s, int shared)
  1388. {
  1389. int ret = find_unused_picture(s, shared);
  1390. if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
  1391. if (s->picture[ret].needs_realloc) {
  1392. s->picture[ret].needs_realloc = 0;
  1393. ff_free_picture_tables(&s->picture[ret]);
  1394. ff_mpeg_unref_picture(s, &s->picture[ret]);
  1395. }
  1396. }
  1397. return ret;
  1398. }
  1399. /**
  1400. * generic function called after decoding
  1401. * the header and before a frame is decoded.
  1402. */
  1403. int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
  1404. {
  1405. int i, ret;
  1406. Picture *pic;
  1407. s->mb_skipped = 0;
  1408. /* mark & release old frames */
  1409. if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
  1410. s->last_picture_ptr != s->next_picture_ptr &&
  1411. s->last_picture_ptr->f.buf[0]) {
  1412. ff_mpeg_unref_picture(s, s->last_picture_ptr);
  1413. }
  1414. /* release forgotten pictures */
  1415. /* if (mpeg124/h263) */
  1416. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1417. if (&s->picture[i] != s->last_picture_ptr &&
  1418. &s->picture[i] != s->next_picture_ptr &&
  1419. s->picture[i].reference && !s->picture[i].needs_realloc) {
  1420. if (!(avctx->active_thread_type & FF_THREAD_FRAME))
  1421. av_log(avctx, AV_LOG_ERROR,
  1422. "releasing zombie picture\n");
  1423. ff_mpeg_unref_picture(s, &s->picture[i]);
  1424. }
  1425. }
  1426. ff_mpeg_unref_picture(s, &s->current_picture);
  1427. release_unused_pictures(s);
  1428. if (s->current_picture_ptr &&
  1429. s->current_picture_ptr->f.buf[0] == NULL) {
  1430. // we already have a unused image
  1431. // (maybe it was set before reading the header)
  1432. pic = s->current_picture_ptr;
  1433. } else {
  1434. i = ff_find_unused_picture(s, 0);
  1435. if (i < 0) {
  1436. av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
  1437. return i;
  1438. }
  1439. pic = &s->picture[i];
  1440. }
  1441. pic->reference = 0;
  1442. if (!s->droppable) {
  1443. if (s->pict_type != AV_PICTURE_TYPE_B)
  1444. pic->reference = 3;
  1445. }
  1446. pic->f.coded_picture_number = s->coded_picture_number++;
  1447. if (ff_alloc_picture(s, pic, 0) < 0)
  1448. return -1;
  1449. s->current_picture_ptr = pic;
  1450. // FIXME use only the vars from current_pic
  1451. s->current_picture_ptr->f.top_field_first = s->top_field_first;
  1452. if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
  1453. s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
  1454. if (s->picture_structure != PICT_FRAME)
  1455. s->current_picture_ptr->f.top_field_first =
  1456. (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
  1457. }
  1458. s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
  1459. !s->progressive_sequence;
  1460. s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
  1461. s->current_picture_ptr->f.pict_type = s->pict_type;
  1462. // if (s->flags && CODEC_FLAG_QSCALE)
  1463. // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
  1464. s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
  1465. if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
  1466. s->current_picture_ptr)) < 0)
  1467. return ret;
  1468. if (s->pict_type != AV_PICTURE_TYPE_B) {
  1469. s->last_picture_ptr = s->next_picture_ptr;
  1470. if (!s->droppable)
  1471. s->next_picture_ptr = s->current_picture_ptr;
  1472. }
  1473. av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
  1474. s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
  1475. s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
  1476. s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
  1477. s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
  1478. s->pict_type, s->droppable);
  1479. if ((s->last_picture_ptr == NULL ||
  1480. s->last_picture_ptr->f.buf[0] == NULL) &&
  1481. (s->pict_type != AV_PICTURE_TYPE_I ||
  1482. s->picture_structure != PICT_FRAME)) {
  1483. int h_chroma_shift, v_chroma_shift;
  1484. av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
  1485. &h_chroma_shift, &v_chroma_shift);
  1486. if (s->pict_type != AV_PICTURE_TYPE_I)
  1487. av_log(avctx, AV_LOG_ERROR,
  1488. "warning: first frame is no keyframe\n");
  1489. else if (s->picture_structure != PICT_FRAME)
  1490. av_log(avctx, AV_LOG_INFO,
  1491. "allocate dummy last picture for field based first keyframe\n");
  1492. /* Allocate a dummy frame */
  1493. i = ff_find_unused_picture(s, 0);
  1494. if (i < 0) {
  1495. av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
  1496. return i;
  1497. }
  1498. s->last_picture_ptr = &s->picture[i];
  1499. s->last_picture_ptr->reference = 3;
  1500. s->last_picture_ptr->f.pict_type = AV_PICTURE_TYPE_I;
  1501. if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
  1502. s->last_picture_ptr = NULL;
  1503. return -1;
  1504. }
  1505. memset(s->last_picture_ptr->f.data[0], 0,
  1506. avctx->height * s->last_picture_ptr->f.linesize[0]);
  1507. memset(s->last_picture_ptr->f.data[1], 0x80,
  1508. (avctx->height >> v_chroma_shift) *
  1509. s->last_picture_ptr->f.linesize[1]);
  1510. memset(s->last_picture_ptr->f.data[2], 0x80,
  1511. (avctx->height >> v_chroma_shift) *
  1512. s->last_picture_ptr->f.linesize[2]);
  1513. ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
  1514. ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
  1515. }
  1516. if ((s->next_picture_ptr == NULL ||
  1517. s->next_picture_ptr->f.buf[0] == NULL) &&
  1518. s->pict_type == AV_PICTURE_TYPE_B) {
  1519. /* Allocate a dummy frame */
  1520. i = ff_find_unused_picture(s, 0);
  1521. if (i < 0) {
  1522. av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
  1523. return i;
  1524. }
  1525. s->next_picture_ptr = &s->picture[i];
  1526. s->next_picture_ptr->reference = 3;
  1527. s->next_picture_ptr->f.pict_type = AV_PICTURE_TYPE_I;
  1528. if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
  1529. s->next_picture_ptr = NULL;
  1530. return -1;
  1531. }
  1532. ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
  1533. ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
  1534. }
  1535. if (s->last_picture_ptr) {
  1536. ff_mpeg_unref_picture(s, &s->last_picture);
  1537. if (s->last_picture_ptr->f.buf[0] &&
  1538. (ret = ff_mpeg_ref_picture(s, &s->last_picture,
  1539. s->last_picture_ptr)) < 0)
  1540. return ret;
  1541. }
  1542. if (s->next_picture_ptr) {
  1543. ff_mpeg_unref_picture(s, &s->next_picture);
  1544. if (s->next_picture_ptr->f.buf[0] &&
  1545. (ret = ff_mpeg_ref_picture(s, &s->next_picture,
  1546. s->next_picture_ptr)) < 0)
  1547. return ret;
  1548. }
  1549. if (s->pict_type != AV_PICTURE_TYPE_I &&
  1550. !(s->last_picture_ptr && s->last_picture_ptr->f.buf[0])) {
  1551. av_log(s, AV_LOG_ERROR,
  1552. "Non-reference picture received and no reference available\n");
  1553. return AVERROR_INVALIDDATA;
  1554. }
  1555. if (s->picture_structure!= PICT_FRAME) {
  1556. int i;
  1557. for (i = 0; i < 4; i++) {
  1558. if (s->picture_structure == PICT_BOTTOM_FIELD) {
  1559. s->current_picture.f.data[i] +=
  1560. s->current_picture.f.linesize[i];
  1561. }
  1562. s->current_picture.f.linesize[i] *= 2;
  1563. s->last_picture.f.linesize[i] *= 2;
  1564. s->next_picture.f.linesize[i] *= 2;
  1565. }
  1566. }
  1567. s->err_recognition = avctx->err_recognition;
  1568. /* set dequantizer, we can't do it during init as
  1569. * it might change for mpeg4 and we can't do it in the header
  1570. * decode as init is not called for mpeg4 there yet */
  1571. if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
  1572. s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
  1573. s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
  1574. } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
  1575. s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
  1576. s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
  1577. } else {
  1578. s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
  1579. s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
  1580. }
  1581. #if FF_API_XVMC
  1582. FF_DISABLE_DEPRECATION_WARNINGS
  1583. if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
  1584. return ff_xvmc_field_start(s, avctx);
  1585. FF_ENABLE_DEPRECATION_WARNINGS
  1586. #endif /* FF_API_XVMC */
  1587. return 0;
  1588. }
  1589. /* called after a frame has been decoded. */
  1590. void ff_MPV_frame_end(MpegEncContext *s)
  1591. {
  1592. #if FF_API_XVMC
  1593. FF_DISABLE_DEPRECATION_WARNINGS
  1594. /* redraw edges for the frame if decoding didn't complete */
  1595. // just to make sure that all data is rendered.
  1596. if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
  1597. ff_xvmc_field_end(s);
  1598. } else
  1599. FF_ENABLE_DEPRECATION_WARNINGS
  1600. #endif /* FF_API_XVMC */
  1601. emms_c();
  1602. if (s->current_picture.reference)
  1603. ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
  1604. }
  1605. /**
  1606. * Print debugging info for the given picture.
  1607. */
  1608. void ff_print_debug_info(MpegEncContext *s, Picture *p)
  1609. {
  1610. AVFrame *pict;
  1611. if (s->avctx->hwaccel || !p || !p->mb_type)
  1612. return;
  1613. pict = &p->f;
  1614. if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
  1615. int x,y;
  1616. av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
  1617. switch (pict->pict_type) {
  1618. case AV_PICTURE_TYPE_I:
  1619. av_log(s->avctx,AV_LOG_DEBUG,"I\n");
  1620. break;
  1621. case AV_PICTURE_TYPE_P:
  1622. av_log(s->avctx,AV_LOG_DEBUG,"P\n");
  1623. break;
  1624. case AV_PICTURE_TYPE_B:
  1625. av_log(s->avctx,AV_LOG_DEBUG,"B\n");
  1626. break;
  1627. case AV_PICTURE_TYPE_S:
  1628. av_log(s->avctx,AV_LOG_DEBUG,"S\n");
  1629. break;
  1630. case AV_PICTURE_TYPE_SI:
  1631. av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
  1632. break;
  1633. case AV_PICTURE_TYPE_SP:
  1634. av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
  1635. break;
  1636. }
  1637. for (y = 0; y < s->mb_height; y++) {
  1638. for (x = 0; x < s->mb_width; x++) {
  1639. if (s->avctx->debug & FF_DEBUG_SKIP) {
  1640. int count = s->mbskip_table[x + y * s->mb_stride];
  1641. if (count > 9)
  1642. count = 9;
  1643. av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
  1644. }
  1645. if (s->avctx->debug & FF_DEBUG_QP) {
  1646. av_log(s->avctx, AV_LOG_DEBUG, "%2d",
  1647. p->qscale_table[x + y * s->mb_stride]);
  1648. }
  1649. if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
  1650. int mb_type = p->mb_type[x + y * s->mb_stride];
  1651. // Type & MV direction
  1652. if (IS_PCM(mb_type))
  1653. av_log(s->avctx, AV_LOG_DEBUG, "P");
  1654. else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
  1655. av_log(s->avctx, AV_LOG_DEBUG, "A");
  1656. else if (IS_INTRA4x4(mb_type))
  1657. av_log(s->avctx, AV_LOG_DEBUG, "i");
  1658. else if (IS_INTRA16x16(mb_type))
  1659. av_log(s->avctx, AV_LOG_DEBUG, "I");
  1660. else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
  1661. av_log(s->avctx, AV_LOG_DEBUG, "d");
  1662. else if (IS_DIRECT(mb_type))
  1663. av_log(s->avctx, AV_LOG_DEBUG, "D");
  1664. else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
  1665. av_log(s->avctx, AV_LOG_DEBUG, "g");
  1666. else if (IS_GMC(mb_type))
  1667. av_log(s->avctx, AV_LOG_DEBUG, "G");
  1668. else if (IS_SKIP(mb_type))
  1669. av_log(s->avctx, AV_LOG_DEBUG, "S");
  1670. else if (!USES_LIST(mb_type, 1))
  1671. av_log(s->avctx, AV_LOG_DEBUG, ">");
  1672. else if (!USES_LIST(mb_type, 0))
  1673. av_log(s->avctx, AV_LOG_DEBUG, "<");
  1674. else {
  1675. assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
  1676. av_log(s->avctx, AV_LOG_DEBUG, "X");
  1677. }
  1678. // segmentation
  1679. if (IS_8X8(mb_type))
  1680. av_log(s->avctx, AV_LOG_DEBUG, "+");
  1681. else if (IS_16X8(mb_type))
  1682. av_log(s->avctx, AV_LOG_DEBUG, "-");
  1683. else if (IS_8X16(mb_type))
  1684. av_log(s->avctx, AV_LOG_DEBUG, "|");
  1685. else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
  1686. av_log(s->avctx, AV_LOG_DEBUG, " ");
  1687. else
  1688. av_log(s->avctx, AV_LOG_DEBUG, "?");
  1689. if (IS_INTERLACED(mb_type))
  1690. av_log(s->avctx, AV_LOG_DEBUG, "=");
  1691. else
  1692. av_log(s->avctx, AV_LOG_DEBUG, " ");
  1693. }
  1694. }
  1695. av_log(s->avctx, AV_LOG_DEBUG, "\n");
  1696. }
  1697. }
  1698. }
  1699. /**
  1700. * find the lowest MB row referenced in the MVs
  1701. */
  1702. int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
  1703. {
  1704. int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
  1705. int my, off, i, mvs;
  1706. if (s->picture_structure != PICT_FRAME || s->mcsel)
  1707. goto unhandled;
  1708. switch (s->mv_type) {
  1709. case MV_TYPE_16X16:
  1710. mvs = 1;
  1711. break;
  1712. case MV_TYPE_16X8:
  1713. mvs = 2;
  1714. break;
  1715. case MV_TYPE_8X8:
  1716. mvs = 4;
  1717. break;
  1718. default:
  1719. goto unhandled;
  1720. }
  1721. for (i = 0; i < mvs; i++) {
  1722. my = s->mv[dir][i][1]<<qpel_shift;
  1723. my_max = FFMAX(my_max, my);
  1724. my_min = FFMIN(my_min, my);
  1725. }
  1726. off = (FFMAX(-my_min, my_max) + 63) >> 6;
  1727. return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
  1728. unhandled:
  1729. return s->mb_height-1;
  1730. }
  1731. /* put block[] to dest[] */
  1732. static inline void put_dct(MpegEncContext *s,
  1733. int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
  1734. {
  1735. s->dct_unquantize_intra(s, block, i, qscale);
  1736. s->dsp.idct_put (dest, line_size, block);
  1737. }
  1738. /* add block[] to dest[] */
  1739. static inline void add_dct(MpegEncContext *s,
  1740. int16_t *block, int i, uint8_t *dest, int line_size)
  1741. {
  1742. if (s->block_last_index[i] >= 0) {
  1743. s->dsp.idct_add (dest, line_size, block);
  1744. }
  1745. }
  1746. static inline void add_dequant_dct(MpegEncContext *s,
  1747. int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
  1748. {
  1749. if (s->block_last_index[i] >= 0) {
  1750. s->dct_unquantize_inter(s, block, i, qscale);
  1751. s->dsp.idct_add (dest, line_size, block);
  1752. }
  1753. }
  1754. /**
  1755. * Clean dc, ac, coded_block for the current non-intra MB.
  1756. */
  1757. void ff_clean_intra_table_entries(MpegEncContext *s)
  1758. {
  1759. int wrap = s->b8_stride;
  1760. int xy = s->block_index[0];
  1761. s->dc_val[0][xy ] =
  1762. s->dc_val[0][xy + 1 ] =
  1763. s->dc_val[0][xy + wrap] =
  1764. s->dc_val[0][xy + 1 + wrap] = 1024;
  1765. /* ac pred */
  1766. memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
  1767. memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
  1768. if (s->msmpeg4_version>=3) {
  1769. s->coded_block[xy ] =
  1770. s->coded_block[xy + 1 ] =
  1771. s->coded_block[xy + wrap] =
  1772. s->coded_block[xy + 1 + wrap] = 0;
  1773. }
  1774. /* chroma */
  1775. wrap = s->mb_stride;
  1776. xy = s->mb_x + s->mb_y * wrap;
  1777. s->dc_val[1][xy] =
  1778. s->dc_val[2][xy] = 1024;
  1779. /* ac pred */
  1780. memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
  1781. memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
  1782. s->mbintra_table[xy]= 0;
  1783. }
  1784. /* generic function called after a macroblock has been parsed by the
  1785. decoder or after it has been encoded by the encoder.
  1786. Important variables used:
  1787. s->mb_intra : true if intra macroblock
  1788. s->mv_dir : motion vector direction
  1789. s->mv_type : motion vector type
  1790. s->mv : motion vector
  1791. s->interlaced_dct : true if interlaced dct used (mpeg2)
  1792. */
  1793. static av_always_inline
  1794. void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
  1795. int is_mpeg12)
  1796. {
  1797. const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
  1798. #if FF_API_XVMC
  1799. FF_DISABLE_DEPRECATION_WARNINGS
  1800. if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
  1801. ff_xvmc_decode_mb(s);//xvmc uses pblocks
  1802. return;
  1803. }
  1804. FF_ENABLE_DEPRECATION_WARNINGS
  1805. #endif /* FF_API_XVMC */
  1806. if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
  1807. /* print DCT coefficients */
  1808. int i,j;
  1809. av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
  1810. for(i=0; i<6; i++){
  1811. for(j=0; j<64; j++){
  1812. av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
  1813. }
  1814. av_log(s->avctx, AV_LOG_DEBUG, "\n");
  1815. }
  1816. }
  1817. s->current_picture.qscale_table[mb_xy] = s->qscale;
  1818. /* update DC predictors for P macroblocks */
  1819. if (!s->mb_intra) {
  1820. if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
  1821. if(s->mbintra_table[mb_xy])
  1822. ff_clean_intra_table_entries(s);
  1823. } else {
  1824. s->last_dc[0] =
  1825. s->last_dc[1] =
  1826. s->last_dc[2] = 128 << s->intra_dc_precision;
  1827. }
  1828. }
  1829. else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
  1830. s->mbintra_table[mb_xy]=1;
  1831. if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
  1832. uint8_t *dest_y, *dest_cb, *dest_cr;
  1833. int dct_linesize, dct_offset;
  1834. op_pixels_func (*op_pix)[4];
  1835. qpel_mc_func (*op_qpix)[16];
  1836. const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
  1837. const int uvlinesize = s->current_picture.f.linesize[1];
  1838. const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
  1839. const int block_size = 8;
  1840. /* avoid copy if macroblock skipped in last frame too */
  1841. /* skip only during decoding as we might trash the buffers during encoding a bit */
  1842. if(!s->encoding){
  1843. uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
  1844. if (s->mb_skipped) {
  1845. s->mb_skipped= 0;
  1846. assert(s->pict_type!=AV_PICTURE_TYPE_I);
  1847. *mbskip_ptr = 1;
  1848. } else if(!s->current_picture.reference) {
  1849. *mbskip_ptr = 1;
  1850. } else{
  1851. *mbskip_ptr = 0; /* not skipped */
  1852. }
  1853. }
  1854. dct_linesize = linesize << s->interlaced_dct;
  1855. dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
  1856. if(readable){
  1857. dest_y= s->dest[0];
  1858. dest_cb= s->dest[1];
  1859. dest_cr= s->dest[2];
  1860. }else{
  1861. dest_y = s->b_scratchpad;
  1862. dest_cb= s->b_scratchpad+16*linesize;
  1863. dest_cr= s->b_scratchpad+32*linesize;
  1864. }
  1865. if (!s->mb_intra) {
  1866. /* motion handling */
  1867. /* decoding or more than one mb_type (MC was already done otherwise) */
  1868. if(!s->encoding){
  1869. if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
  1870. if (s->mv_dir & MV_DIR_FORWARD) {
  1871. ff_thread_await_progress(&s->last_picture_ptr->tf,
  1872. ff_MPV_lowest_referenced_row(s, 0),
  1873. 0);
  1874. }
  1875. if (s->mv_dir & MV_DIR_BACKWARD) {
  1876. ff_thread_await_progress(&s->next_picture_ptr->tf,
  1877. ff_MPV_lowest_referenced_row(s, 1),
  1878. 0);
  1879. }
  1880. }
  1881. op_qpix= s->me.qpel_put;
  1882. if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
  1883. op_pix = s->hdsp.put_pixels_tab;
  1884. }else{
  1885. op_pix = s->hdsp.put_no_rnd_pixels_tab;
  1886. }
  1887. if (s->mv_dir & MV_DIR_FORWARD) {
  1888. ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
  1889. op_pix = s->hdsp.avg_pixels_tab;
  1890. op_qpix= s->me.qpel_avg;
  1891. }
  1892. if (s->mv_dir & MV_DIR_BACKWARD) {
  1893. ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
  1894. }
  1895. }
  1896. /* skip dequant / idct if we are really late ;) */
  1897. if(s->avctx->skip_idct){
  1898. if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
  1899. ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
  1900. || s->avctx->skip_idct >= AVDISCARD_ALL)
  1901. goto skip_idct;
  1902. }
  1903. /* add dct residue */
  1904. if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
  1905. || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
  1906. add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
  1907. add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
  1908. add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
  1909. add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
  1910. if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
  1911. if (s->chroma_y_shift){
  1912. add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
  1913. add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
  1914. }else{
  1915. dct_linesize >>= 1;
  1916. dct_offset >>=1;
  1917. add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
  1918. add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
  1919. add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
  1920. add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
  1921. }
  1922. }
  1923. } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
  1924. add_dct(s, block[0], 0, dest_y , dct_linesize);
  1925. add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
  1926. add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
  1927. add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
  1928. if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
  1929. if(s->chroma_y_shift){//Chroma420
  1930. add_dct(s, block[4], 4, dest_cb, uvlinesize);
  1931. add_dct(s, block[5], 5, dest_cr, uvlinesize);
  1932. }else{
  1933. //chroma422
  1934. dct_linesize = uvlinesize << s->interlaced_dct;
  1935. dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
  1936. add_dct(s, block[4], 4, dest_cb, dct_linesize);
  1937. add_dct(s, block[5], 5, dest_cr, dct_linesize);
  1938. add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
  1939. add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
  1940. if(!s->chroma_x_shift){//Chroma444
  1941. add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
  1942. add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
  1943. add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
  1944. add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
  1945. }
  1946. }
  1947. }//fi gray
  1948. }
  1949. else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
  1950. ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
  1951. }
  1952. } else {
  1953. /* dct only in intra block */
  1954. if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
  1955. put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
  1956. put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
  1957. put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
  1958. put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
  1959. if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
  1960. if(s->chroma_y_shift){
  1961. put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
  1962. put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
  1963. }else{
  1964. dct_offset >>=1;
  1965. dct_linesize >>=1;
  1966. put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
  1967. put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
  1968. put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
  1969. put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
  1970. }
  1971. }
  1972. }else{
  1973. s->dsp.idct_put(dest_y , dct_linesize, block[0]);
  1974. s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
  1975. s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
  1976. s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
  1977. if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
  1978. if(s->chroma_y_shift){
  1979. s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
  1980. s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
  1981. }else{
  1982. dct_linesize = uvlinesize << s->interlaced_dct;
  1983. dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
  1984. s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
  1985. s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
  1986. s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
  1987. s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
  1988. if(!s->chroma_x_shift){//Chroma444
  1989. s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
  1990. s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
  1991. s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
  1992. s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
  1993. }
  1994. }
  1995. }//gray
  1996. }
  1997. }
  1998. skip_idct:
  1999. if(!readable){
  2000. s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
  2001. s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
  2002. s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
  2003. }
  2004. }
  2005. }
  2006. void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
  2007. #if !CONFIG_SMALL
  2008. if(s->out_format == FMT_MPEG1) {
  2009. MPV_decode_mb_internal(s, block, 1);
  2010. } else
  2011. #endif
  2012. MPV_decode_mb_internal(s, block, 0);
  2013. }
  2014. void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
  2015. {
  2016. ff_draw_horiz_band(s->avctx, &s->current_picture.f,
  2017. &s->last_picture.f, y, h, s->picture_structure,
  2018. s->first_field, s->low_delay);
  2019. }
  2020. void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
  2021. const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
  2022. const int uvlinesize = s->current_picture.f.linesize[1];
  2023. const int mb_size= 4;
  2024. s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
  2025. s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
  2026. s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
  2027. s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
  2028. s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
  2029. s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
  2030. //block_index is not used by mpeg2, so it is not affected by chroma_format
  2031. s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
  2032. s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
  2033. s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
  2034. if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
  2035. {
  2036. if(s->picture_structure==PICT_FRAME){
  2037. s->dest[0] += s->mb_y * linesize << mb_size;
  2038. s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
  2039. s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
  2040. }else{
  2041. s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
  2042. s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
  2043. s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
  2044. assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
  2045. }
  2046. }
  2047. }
  2048. /**
  2049. * Permute an 8x8 block.
  2050. * @param block the block which will be permuted according to the given permutation vector
  2051. * @param permutation the permutation vector
  2052. * @param last the last non zero coefficient in scantable order, used to speed the permutation up
  2053. * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
  2054. * (inverse) permutated to scantable order!
  2055. */
  2056. void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
  2057. {
  2058. int i;
  2059. int16_t temp[64];
  2060. if(last<=0) return;
  2061. //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
  2062. for(i=0; i<=last; i++){
  2063. const int j= scantable[i];
  2064. temp[j]= block[j];
  2065. block[j]=0;
  2066. }
  2067. for(i=0; i<=last; i++){
  2068. const int j= scantable[i];
  2069. const int perm_j= permutation[j];
  2070. block[perm_j]= temp[j];
  2071. }
  2072. }
  2073. void ff_mpeg_flush(AVCodecContext *avctx){
  2074. int i;
  2075. MpegEncContext *s = avctx->priv_data;
  2076. if(s==NULL || s->picture==NULL)
  2077. return;
  2078. for (i = 0; i < MAX_PICTURE_COUNT; i++)
  2079. ff_mpeg_unref_picture(s, &s->picture[i]);
  2080. s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
  2081. ff_mpeg_unref_picture(s, &s->current_picture);
  2082. ff_mpeg_unref_picture(s, &s->last_picture);
  2083. ff_mpeg_unref_picture(s, &s->next_picture);
  2084. s->mb_x= s->mb_y= 0;
  2085. s->parse_context.state= -1;
  2086. s->parse_context.frame_start_found= 0;
  2087. s->parse_context.overread= 0;
  2088. s->parse_context.overread_index= 0;
  2089. s->parse_context.index= 0;
  2090. s->parse_context.last_index= 0;
  2091. s->bitstream_buffer_size=0;
  2092. s->pp_time=0;
  2093. }
  2094. /**
  2095. * set qscale and update qscale dependent variables.
  2096. */
  2097. void ff_set_qscale(MpegEncContext * s, int qscale)
  2098. {
  2099. if (qscale < 1)
  2100. qscale = 1;
  2101. else if (qscale > 31)
  2102. qscale = 31;
  2103. s->qscale = qscale;
  2104. s->chroma_qscale= s->chroma_qscale_table[qscale];
  2105. s->y_dc_scale= s->y_dc_scale_table[ qscale ];
  2106. s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
  2107. }
  2108. void ff_MPV_report_decode_progress(MpegEncContext *s)
  2109. {
  2110. if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
  2111. ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
  2112. }
  2113. #if CONFIG_ERROR_RESILIENCE
  2114. void ff_mpeg_set_erpic(ERPicture *dst, Picture *src)
  2115. {
  2116. int i;
  2117. if (!src)
  2118. return;
  2119. dst->f = &src->f;
  2120. dst->tf = &src->tf;
  2121. for (i = 0; i < 2; i++) {
  2122. dst->motion_val[i] = src->motion_val[i];
  2123. dst->ref_index[i] = src->ref_index[i];
  2124. }
  2125. dst->mb_type = src->mb_type;
  2126. dst->field_picture = src->field_picture;
  2127. }
  2128. void ff_mpeg_er_frame_start(MpegEncContext *s)
  2129. {
  2130. ERContext *er = &s->er;
  2131. ff_mpeg_set_erpic(&er->cur_pic, s->current_picture_ptr);
  2132. ff_mpeg_set_erpic(&er->next_pic, s->next_picture_ptr);
  2133. ff_mpeg_set_erpic(&er->last_pic, s->last_picture_ptr);
  2134. er->pp_time = s->pp_time;
  2135. er->pb_time = s->pb_time;
  2136. er->quarter_sample = s->quarter_sample;
  2137. er->partitioned_frame = s->partitioned_frame;
  2138. ff_er_frame_start(er);
  2139. }
  2140. #endif /* CONFIG_ERROR_RESILIENCE */