You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3177 lines
114KB

  1. /*
  2. * The simplest mpeg encoder (well, it was the simplest!)
  3. * Copyright (c) 2000,2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
  7. *
  8. * This file is part of FFmpeg.
  9. *
  10. * FFmpeg is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * FFmpeg is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with FFmpeg; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. /**
  25. * @file
  26. * The simplest mpeg encoder (well, it was the simplest!).
  27. */
  28. #include "libavutil/attributes.h"
  29. #include "libavutil/avassert.h"
  30. #include "libavutil/imgutils.h"
  31. #include "libavutil/internal.h"
  32. #include "libavutil/timer.h"
  33. #include "avcodec.h"
  34. #include "dsputil.h"
  35. #include "h264chroma.h"
  36. #include "internal.h"
  37. #include "mathops.h"
  38. #include "mpegutils.h"
  39. #include "mpegvideo.h"
  40. #include "mjpegenc.h"
  41. #include "msmpeg4.h"
  42. #include "thread.h"
  43. #include <limits.h>
  44. static const uint8_t ff_default_chroma_qscale_table[32] = {
  45. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  46. 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
  47. 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
  48. };
  49. const uint8_t ff_mpeg1_dc_scale_table[128] = {
  50. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  51. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  52. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  53. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  54. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  55. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  56. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  57. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  58. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  59. };
  60. static const uint8_t mpeg2_dc_scale_table1[128] = {
  61. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  62. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  63. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  64. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  65. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  66. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  67. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  68. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  69. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  70. };
  71. static const uint8_t mpeg2_dc_scale_table2[128] = {
  72. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  73. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  74. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  75. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  76. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  77. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  78. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  79. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  80. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  81. };
  82. static const uint8_t mpeg2_dc_scale_table3[128] = {
  83. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  84. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  85. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  86. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  87. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  88. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  89. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  90. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  91. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  92. };
  93. const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
  94. ff_mpeg1_dc_scale_table,
  95. mpeg2_dc_scale_table1,
  96. mpeg2_dc_scale_table2,
  97. mpeg2_dc_scale_table3,
  98. };
  99. static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
  100. int16_t *block, int n, int qscale)
  101. {
  102. int i, level, nCoeffs;
  103. const uint16_t *quant_matrix;
  104. nCoeffs= s->block_last_index[n];
  105. block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
  106. /* XXX: only mpeg1 */
  107. quant_matrix = s->intra_matrix;
  108. for(i=1;i<=nCoeffs;i++) {
  109. int j= s->intra_scantable.permutated[i];
  110. level = block[j];
  111. if (level) {
  112. if (level < 0) {
  113. level = -level;
  114. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  115. level = (level - 1) | 1;
  116. level = -level;
  117. } else {
  118. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  119. level = (level - 1) | 1;
  120. }
  121. block[j] = level;
  122. }
  123. }
  124. }
  125. static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
  126. int16_t *block, int n, int qscale)
  127. {
  128. int i, level, nCoeffs;
  129. const uint16_t *quant_matrix;
  130. nCoeffs= s->block_last_index[n];
  131. quant_matrix = s->inter_matrix;
  132. for(i=0; i<=nCoeffs; i++) {
  133. int j= s->intra_scantable.permutated[i];
  134. level = block[j];
  135. if (level) {
  136. if (level < 0) {
  137. level = -level;
  138. level = (((level << 1) + 1) * qscale *
  139. ((int) (quant_matrix[j]))) >> 4;
  140. level = (level - 1) | 1;
  141. level = -level;
  142. } else {
  143. level = (((level << 1) + 1) * qscale *
  144. ((int) (quant_matrix[j]))) >> 4;
  145. level = (level - 1) | 1;
  146. }
  147. block[j] = level;
  148. }
  149. }
  150. }
  151. static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
  152. int16_t *block, int n, int qscale)
  153. {
  154. int i, level, nCoeffs;
  155. const uint16_t *quant_matrix;
  156. if(s->alternate_scan) nCoeffs= 63;
  157. else nCoeffs= s->block_last_index[n];
  158. block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
  159. quant_matrix = s->intra_matrix;
  160. for(i=1;i<=nCoeffs;i++) {
  161. int j= s->intra_scantable.permutated[i];
  162. level = block[j];
  163. if (level) {
  164. if (level < 0) {
  165. level = -level;
  166. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  167. level = -level;
  168. } else {
  169. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  170. }
  171. block[j] = level;
  172. }
  173. }
  174. }
  175. static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
  176. int16_t *block, int n, int qscale)
  177. {
  178. int i, level, nCoeffs;
  179. const uint16_t *quant_matrix;
  180. int sum=-1;
  181. if(s->alternate_scan) nCoeffs= 63;
  182. else nCoeffs= s->block_last_index[n];
  183. block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
  184. sum += block[0];
  185. quant_matrix = s->intra_matrix;
  186. for(i=1;i<=nCoeffs;i++) {
  187. int j= s->intra_scantable.permutated[i];
  188. level = block[j];
  189. if (level) {
  190. if (level < 0) {
  191. level = -level;
  192. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  193. level = -level;
  194. } else {
  195. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  196. }
  197. block[j] = level;
  198. sum+=level;
  199. }
  200. }
  201. block[63]^=sum&1;
  202. }
  203. static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
  204. int16_t *block, int n, int qscale)
  205. {
  206. int i, level, nCoeffs;
  207. const uint16_t *quant_matrix;
  208. int sum=-1;
  209. if(s->alternate_scan) nCoeffs= 63;
  210. else nCoeffs= s->block_last_index[n];
  211. quant_matrix = s->inter_matrix;
  212. for(i=0; i<=nCoeffs; i++) {
  213. int j= s->intra_scantable.permutated[i];
  214. level = block[j];
  215. if (level) {
  216. if (level < 0) {
  217. level = -level;
  218. level = (((level << 1) + 1) * qscale *
  219. ((int) (quant_matrix[j]))) >> 4;
  220. level = -level;
  221. } else {
  222. level = (((level << 1) + 1) * qscale *
  223. ((int) (quant_matrix[j]))) >> 4;
  224. }
  225. block[j] = level;
  226. sum+=level;
  227. }
  228. }
  229. block[63]^=sum&1;
  230. }
  231. static void dct_unquantize_h263_intra_c(MpegEncContext *s,
  232. int16_t *block, int n, int qscale)
  233. {
  234. int i, level, qmul, qadd;
  235. int nCoeffs;
  236. av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
  237. qmul = qscale << 1;
  238. if (!s->h263_aic) {
  239. block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
  240. qadd = (qscale - 1) | 1;
  241. }else{
  242. qadd = 0;
  243. }
  244. if(s->ac_pred)
  245. nCoeffs=63;
  246. else
  247. nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
  248. for(i=1; i<=nCoeffs; i++) {
  249. level = block[i];
  250. if (level) {
  251. if (level < 0) {
  252. level = level * qmul - qadd;
  253. } else {
  254. level = level * qmul + qadd;
  255. }
  256. block[i] = level;
  257. }
  258. }
  259. }
  260. static void dct_unquantize_h263_inter_c(MpegEncContext *s,
  261. int16_t *block, int n, int qscale)
  262. {
  263. int i, level, qmul, qadd;
  264. int nCoeffs;
  265. av_assert2(s->block_last_index[n]>=0);
  266. qadd = (qscale - 1) | 1;
  267. qmul = qscale << 1;
  268. nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
  269. for(i=0; i<=nCoeffs; i++) {
  270. level = block[i];
  271. if (level) {
  272. if (level < 0) {
  273. level = level * qmul - qadd;
  274. } else {
  275. level = level * qmul + qadd;
  276. }
  277. block[i] = level;
  278. }
  279. }
  280. }
  281. static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
  282. int (*mv)[2][4][2],
  283. int mb_x, int mb_y, int mb_intra, int mb_skipped)
  284. {
  285. MpegEncContext *s = opaque;
  286. s->mv_dir = mv_dir;
  287. s->mv_type = mv_type;
  288. s->mb_intra = mb_intra;
  289. s->mb_skipped = mb_skipped;
  290. s->mb_x = mb_x;
  291. s->mb_y = mb_y;
  292. memcpy(s->mv, mv, sizeof(*mv));
  293. ff_init_block_index(s);
  294. ff_update_block_index(s);
  295. s->dsp.clear_blocks(s->block[0]);
  296. s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
  297. s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
  298. s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
  299. if (ref)
  300. av_log(s->avctx, AV_LOG_DEBUG, "Interlaced error concealment is not fully implemented\n");
  301. ff_MPV_decode_mb(s, s->block);
  302. }
  303. /* init common dct for both encoder and decoder */
  304. av_cold int ff_dct_common_init(MpegEncContext *s)
  305. {
  306. ff_dsputil_init(&s->dsp, s->avctx);
  307. ff_h264chroma_init(&s->h264chroma, 8); //for lowres
  308. ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
  309. ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
  310. s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
  311. s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
  312. s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
  313. s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
  314. s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
  315. if (s->flags & CODEC_FLAG_BITEXACT)
  316. s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
  317. s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
  318. if (ARCH_ALPHA)
  319. ff_MPV_common_init_axp(s);
  320. if (ARCH_ARM)
  321. ff_MPV_common_init_arm(s);
  322. if (ARCH_PPC)
  323. ff_MPV_common_init_ppc(s);
  324. if (ARCH_X86)
  325. ff_MPV_common_init_x86(s);
  326. /* load & permutate scantables
  327. * note: only wmv uses different ones
  328. */
  329. if (s->alternate_scan) {
  330. ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
  331. ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
  332. } else {
  333. ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
  334. ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
  335. }
  336. ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
  337. ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
  338. return 0;
  339. }
  340. static int frame_size_alloc(MpegEncContext *s, int linesize)
  341. {
  342. int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
  343. // edge emu needs blocksize + filter length - 1
  344. // (= 17x17 for halfpel / 21x21 for h264)
  345. // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
  346. // at uvlinesize. It supports only YUV420 so 24x24 is enough
  347. // linesize * interlaced * MBsize
  348. FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 24,
  349. fail);
  350. FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
  351. fail)
  352. s->me.temp = s->me.scratchpad;
  353. s->rd_scratchpad = s->me.scratchpad;
  354. s->b_scratchpad = s->me.scratchpad;
  355. s->obmc_scratchpad = s->me.scratchpad + 16;
  356. return 0;
  357. fail:
  358. av_freep(&s->edge_emu_buffer);
  359. return AVERROR(ENOMEM);
  360. }
  361. /**
  362. * Allocate a frame buffer
  363. */
  364. static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
  365. {
  366. int edges_needed = av_codec_is_encoder(s->avctx->codec);
  367. int r, ret;
  368. pic->tf.f = &pic->f;
  369. if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
  370. s->codec_id != AV_CODEC_ID_VC1IMAGE &&
  371. s->codec_id != AV_CODEC_ID_MSS2) {
  372. if (edges_needed) {
  373. pic->f.width = s->avctx->width + 2 * EDGE_WIDTH;
  374. pic->f.height = s->avctx->height + 2 * EDGE_WIDTH;
  375. }
  376. r = ff_thread_get_buffer(s->avctx, &pic->tf,
  377. pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
  378. } else {
  379. pic->f.width = s->avctx->width;
  380. pic->f.height = s->avctx->height;
  381. pic->f.format = s->avctx->pix_fmt;
  382. r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
  383. }
  384. if (r < 0 || !pic->f.buf[0]) {
  385. av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
  386. r, pic->f.data[0]);
  387. return -1;
  388. }
  389. if (edges_needed) {
  390. int i;
  391. for (i = 0; pic->f.data[i]; i++) {
  392. int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
  393. pic->f.linesize[i] +
  394. (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
  395. pic->f.data[i] += offset;
  396. }
  397. pic->f.width = s->avctx->width;
  398. pic->f.height = s->avctx->height;
  399. }
  400. if (s->avctx->hwaccel) {
  401. assert(!pic->hwaccel_picture_private);
  402. if (s->avctx->hwaccel->priv_data_size) {
  403. pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
  404. if (!pic->hwaccel_priv_buf) {
  405. av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
  406. return -1;
  407. }
  408. pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
  409. }
  410. }
  411. if (s->linesize && (s->linesize != pic->f.linesize[0] ||
  412. s->uvlinesize != pic->f.linesize[1])) {
  413. av_log(s->avctx, AV_LOG_ERROR,
  414. "get_buffer() failed (stride changed)\n");
  415. ff_mpeg_unref_picture(s, pic);
  416. return -1;
  417. }
  418. if (pic->f.linesize[1] != pic->f.linesize[2]) {
  419. av_log(s->avctx, AV_LOG_ERROR,
  420. "get_buffer() failed (uv stride mismatch)\n");
  421. ff_mpeg_unref_picture(s, pic);
  422. return -1;
  423. }
  424. if (!s->edge_emu_buffer &&
  425. (ret = frame_size_alloc(s, pic->f.linesize[0])) < 0) {
  426. av_log(s->avctx, AV_LOG_ERROR,
  427. "get_buffer() failed to allocate context scratch buffers.\n");
  428. ff_mpeg_unref_picture(s, pic);
  429. return ret;
  430. }
  431. return 0;
  432. }
  433. void ff_free_picture_tables(Picture *pic)
  434. {
  435. int i;
  436. pic->alloc_mb_width =
  437. pic->alloc_mb_height = 0;
  438. av_buffer_unref(&pic->mb_var_buf);
  439. av_buffer_unref(&pic->mc_mb_var_buf);
  440. av_buffer_unref(&pic->mb_mean_buf);
  441. av_buffer_unref(&pic->mbskip_table_buf);
  442. av_buffer_unref(&pic->qscale_table_buf);
  443. av_buffer_unref(&pic->mb_type_buf);
  444. for (i = 0; i < 2; i++) {
  445. av_buffer_unref(&pic->motion_val_buf[i]);
  446. av_buffer_unref(&pic->ref_index_buf[i]);
  447. }
  448. }
  449. static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
  450. {
  451. const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
  452. const int mb_array_size = s->mb_stride * s->mb_height;
  453. const int b8_array_size = s->b8_stride * s->mb_height * 2;
  454. int i;
  455. pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
  456. pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
  457. pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
  458. sizeof(uint32_t));
  459. if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
  460. return AVERROR(ENOMEM);
  461. if (s->encoding) {
  462. pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
  463. pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
  464. pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
  465. if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
  466. return AVERROR(ENOMEM);
  467. }
  468. if (s->out_format == FMT_H263 || s->encoding || s->avctx->debug_mv) {
  469. int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
  470. int ref_index_size = 4 * mb_array_size;
  471. for (i = 0; mv_size && i < 2; i++) {
  472. pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
  473. pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
  474. if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
  475. return AVERROR(ENOMEM);
  476. }
  477. }
  478. pic->alloc_mb_width = s->mb_width;
  479. pic->alloc_mb_height = s->mb_height;
  480. return 0;
  481. }
  482. static int make_tables_writable(Picture *pic)
  483. {
  484. int ret, i;
  485. #define MAKE_WRITABLE(table) \
  486. do {\
  487. if (pic->table &&\
  488. (ret = av_buffer_make_writable(&pic->table)) < 0)\
  489. return ret;\
  490. } while (0)
  491. MAKE_WRITABLE(mb_var_buf);
  492. MAKE_WRITABLE(mc_mb_var_buf);
  493. MAKE_WRITABLE(mb_mean_buf);
  494. MAKE_WRITABLE(mbskip_table_buf);
  495. MAKE_WRITABLE(qscale_table_buf);
  496. MAKE_WRITABLE(mb_type_buf);
  497. for (i = 0; i < 2; i++) {
  498. MAKE_WRITABLE(motion_val_buf[i]);
  499. MAKE_WRITABLE(ref_index_buf[i]);
  500. }
  501. return 0;
  502. }
  503. /**
  504. * Allocate a Picture.
  505. * The pixels are allocated/set by calling get_buffer() if shared = 0
  506. */
  507. int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
  508. {
  509. int i, ret;
  510. if (pic->qscale_table_buf)
  511. if ( pic->alloc_mb_width != s->mb_width
  512. || pic->alloc_mb_height != s->mb_height)
  513. ff_free_picture_tables(pic);
  514. if (shared) {
  515. av_assert0(pic->f.data[0]);
  516. pic->shared = 1;
  517. } else {
  518. av_assert0(!pic->f.buf[0]);
  519. if (alloc_frame_buffer(s, pic) < 0)
  520. return -1;
  521. s->linesize = pic->f.linesize[0];
  522. s->uvlinesize = pic->f.linesize[1];
  523. }
  524. if (!pic->qscale_table_buf)
  525. ret = alloc_picture_tables(s, pic);
  526. else
  527. ret = make_tables_writable(pic);
  528. if (ret < 0)
  529. goto fail;
  530. if (s->encoding) {
  531. pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
  532. pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
  533. pic->mb_mean = pic->mb_mean_buf->data;
  534. }
  535. pic->mbskip_table = pic->mbskip_table_buf->data;
  536. pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
  537. pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
  538. if (pic->motion_val_buf[0]) {
  539. for (i = 0; i < 2; i++) {
  540. pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
  541. pic->ref_index[i] = pic->ref_index_buf[i]->data;
  542. }
  543. }
  544. return 0;
  545. fail:
  546. av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
  547. ff_mpeg_unref_picture(s, pic);
  548. ff_free_picture_tables(pic);
  549. return AVERROR(ENOMEM);
  550. }
  551. /**
  552. * Deallocate a picture.
  553. */
  554. void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
  555. {
  556. int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
  557. pic->tf.f = &pic->f;
  558. /* WM Image / Screen codecs allocate internal buffers with different
  559. * dimensions / colorspaces; ignore user-defined callbacks for these. */
  560. if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
  561. s->codec_id != AV_CODEC_ID_VC1IMAGE &&
  562. s->codec_id != AV_CODEC_ID_MSS2)
  563. ff_thread_release_buffer(s->avctx, &pic->tf);
  564. else
  565. av_frame_unref(&pic->f);
  566. av_buffer_unref(&pic->hwaccel_priv_buf);
  567. if (pic->needs_realloc)
  568. ff_free_picture_tables(pic);
  569. memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
  570. }
  571. static int update_picture_tables(Picture *dst, Picture *src)
  572. {
  573. int i;
  574. #define UPDATE_TABLE(table)\
  575. do {\
  576. if (src->table &&\
  577. (!dst->table || dst->table->buffer != src->table->buffer)) {\
  578. av_buffer_unref(&dst->table);\
  579. dst->table = av_buffer_ref(src->table);\
  580. if (!dst->table) {\
  581. ff_free_picture_tables(dst);\
  582. return AVERROR(ENOMEM);\
  583. }\
  584. }\
  585. } while (0)
  586. UPDATE_TABLE(mb_var_buf);
  587. UPDATE_TABLE(mc_mb_var_buf);
  588. UPDATE_TABLE(mb_mean_buf);
  589. UPDATE_TABLE(mbskip_table_buf);
  590. UPDATE_TABLE(qscale_table_buf);
  591. UPDATE_TABLE(mb_type_buf);
  592. for (i = 0; i < 2; i++) {
  593. UPDATE_TABLE(motion_val_buf[i]);
  594. UPDATE_TABLE(ref_index_buf[i]);
  595. }
  596. dst->mb_var = src->mb_var;
  597. dst->mc_mb_var = src->mc_mb_var;
  598. dst->mb_mean = src->mb_mean;
  599. dst->mbskip_table = src->mbskip_table;
  600. dst->qscale_table = src->qscale_table;
  601. dst->mb_type = src->mb_type;
  602. for (i = 0; i < 2; i++) {
  603. dst->motion_val[i] = src->motion_val[i];
  604. dst->ref_index[i] = src->ref_index[i];
  605. }
  606. dst->alloc_mb_width = src->alloc_mb_width;
  607. dst->alloc_mb_height = src->alloc_mb_height;
  608. return 0;
  609. }
  610. int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
  611. {
  612. int ret;
  613. av_assert0(!dst->f.buf[0]);
  614. av_assert0(src->f.buf[0]);
  615. src->tf.f = &src->f;
  616. dst->tf.f = &dst->f;
  617. ret = ff_thread_ref_frame(&dst->tf, &src->tf);
  618. if (ret < 0)
  619. goto fail;
  620. ret = update_picture_tables(dst, src);
  621. if (ret < 0)
  622. goto fail;
  623. if (src->hwaccel_picture_private) {
  624. dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
  625. if (!dst->hwaccel_priv_buf)
  626. goto fail;
  627. dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
  628. }
  629. dst->field_picture = src->field_picture;
  630. dst->mb_var_sum = src->mb_var_sum;
  631. dst->mc_mb_var_sum = src->mc_mb_var_sum;
  632. dst->b_frame_score = src->b_frame_score;
  633. dst->needs_realloc = src->needs_realloc;
  634. dst->reference = src->reference;
  635. dst->shared = src->shared;
  636. return 0;
  637. fail:
  638. ff_mpeg_unref_picture(s, dst);
  639. return ret;
  640. }
  641. static void exchange_uv(MpegEncContext *s)
  642. {
  643. int16_t (*tmp)[64];
  644. tmp = s->pblocks[4];
  645. s->pblocks[4] = s->pblocks[5];
  646. s->pblocks[5] = tmp;
  647. }
  648. static int init_duplicate_context(MpegEncContext *s)
  649. {
  650. int y_size = s->b8_stride * (2 * s->mb_height + 1);
  651. int c_size = s->mb_stride * (s->mb_height + 1);
  652. int yc_size = y_size + 2 * c_size;
  653. int i;
  654. if (s->mb_height & 1)
  655. yc_size += 2*s->b8_stride + 2*s->mb_stride;
  656. s->edge_emu_buffer =
  657. s->me.scratchpad =
  658. s->me.temp =
  659. s->rd_scratchpad =
  660. s->b_scratchpad =
  661. s->obmc_scratchpad = NULL;
  662. if (s->encoding) {
  663. FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
  664. ME_MAP_SIZE * sizeof(uint32_t), fail)
  665. FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
  666. ME_MAP_SIZE * sizeof(uint32_t), fail)
  667. if (s->avctx->noise_reduction) {
  668. FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
  669. 2 * 64 * sizeof(int), fail)
  670. }
  671. }
  672. FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
  673. s->block = s->blocks[0];
  674. for (i = 0; i < 12; i++) {
  675. s->pblocks[i] = &s->block[i];
  676. }
  677. if (s->avctx->codec_tag == AV_RL32("VCR2"))
  678. exchange_uv(s);
  679. if (s->out_format == FMT_H263) {
  680. /* ac values */
  681. FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
  682. yc_size * sizeof(int16_t) * 16, fail);
  683. s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
  684. s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
  685. s->ac_val[2] = s->ac_val[1] + c_size;
  686. }
  687. return 0;
  688. fail:
  689. return -1; // free() through ff_MPV_common_end()
  690. }
  691. static void free_duplicate_context(MpegEncContext *s)
  692. {
  693. if (s == NULL)
  694. return;
  695. av_freep(&s->edge_emu_buffer);
  696. av_freep(&s->me.scratchpad);
  697. s->me.temp =
  698. s->rd_scratchpad =
  699. s->b_scratchpad =
  700. s->obmc_scratchpad = NULL;
  701. av_freep(&s->dct_error_sum);
  702. av_freep(&s->me.map);
  703. av_freep(&s->me.score_map);
  704. av_freep(&s->blocks);
  705. av_freep(&s->ac_val_base);
  706. s->block = NULL;
  707. }
  708. static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
  709. {
  710. #define COPY(a) bak->a = src->a
  711. COPY(edge_emu_buffer);
  712. COPY(me.scratchpad);
  713. COPY(me.temp);
  714. COPY(rd_scratchpad);
  715. COPY(b_scratchpad);
  716. COPY(obmc_scratchpad);
  717. COPY(me.map);
  718. COPY(me.score_map);
  719. COPY(blocks);
  720. COPY(block);
  721. COPY(start_mb_y);
  722. COPY(end_mb_y);
  723. COPY(me.map_generation);
  724. COPY(pb);
  725. COPY(dct_error_sum);
  726. COPY(dct_count[0]);
  727. COPY(dct_count[1]);
  728. COPY(ac_val_base);
  729. COPY(ac_val[0]);
  730. COPY(ac_val[1]);
  731. COPY(ac_val[2]);
  732. #undef COPY
  733. }
  734. int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
  735. {
  736. MpegEncContext bak;
  737. int i, ret;
  738. // FIXME copy only needed parts
  739. // START_TIMER
  740. backup_duplicate_context(&bak, dst);
  741. memcpy(dst, src, sizeof(MpegEncContext));
  742. backup_duplicate_context(dst, &bak);
  743. for (i = 0; i < 12; i++) {
  744. dst->pblocks[i] = &dst->block[i];
  745. }
  746. if (dst->avctx->codec_tag == AV_RL32("VCR2"))
  747. exchange_uv(dst);
  748. if (!dst->edge_emu_buffer &&
  749. (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
  750. av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
  751. "scratch buffers.\n");
  752. return ret;
  753. }
  754. // STOP_TIMER("update_duplicate_context")
  755. // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
  756. return 0;
  757. }
  758. int ff_mpeg_update_thread_context(AVCodecContext *dst,
  759. const AVCodecContext *src)
  760. {
  761. int i, ret;
  762. MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
  763. if (dst == src)
  764. return 0;
  765. av_assert0(s != s1);
  766. // FIXME can parameters change on I-frames?
  767. // in that case dst may need a reinit
  768. if (!s->context_initialized) {
  769. memcpy(s, s1, sizeof(MpegEncContext));
  770. s->avctx = dst;
  771. s->bitstream_buffer = NULL;
  772. s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
  773. if (s1->context_initialized){
  774. // s->picture_range_start += MAX_PICTURE_COUNT;
  775. // s->picture_range_end += MAX_PICTURE_COUNT;
  776. if((ret = ff_MPV_common_init(s)) < 0){
  777. memset(s, 0, sizeof(MpegEncContext));
  778. s->avctx = dst;
  779. return ret;
  780. }
  781. }
  782. }
  783. if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
  784. s->context_reinit = 0;
  785. s->height = s1->height;
  786. s->width = s1->width;
  787. if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
  788. return ret;
  789. }
  790. s->avctx->coded_height = s1->avctx->coded_height;
  791. s->avctx->coded_width = s1->avctx->coded_width;
  792. s->avctx->width = s1->avctx->width;
  793. s->avctx->height = s1->avctx->height;
  794. s->coded_picture_number = s1->coded_picture_number;
  795. s->picture_number = s1->picture_number;
  796. av_assert0(!s->picture || s->picture != s1->picture);
  797. if(s->picture)
  798. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  799. ff_mpeg_unref_picture(s, &s->picture[i]);
  800. if (s1->picture[i].f.buf[0] &&
  801. (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
  802. return ret;
  803. }
  804. #define UPDATE_PICTURE(pic)\
  805. do {\
  806. ff_mpeg_unref_picture(s, &s->pic);\
  807. if (s1->pic.f.buf[0])\
  808. ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
  809. else\
  810. ret = update_picture_tables(&s->pic, &s1->pic);\
  811. if (ret < 0)\
  812. return ret;\
  813. } while (0)
  814. UPDATE_PICTURE(current_picture);
  815. UPDATE_PICTURE(last_picture);
  816. UPDATE_PICTURE(next_picture);
  817. s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
  818. s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
  819. s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
  820. // Error/bug resilience
  821. s->next_p_frame_damaged = s1->next_p_frame_damaged;
  822. s->workaround_bugs = s1->workaround_bugs;
  823. s->padding_bug_score = s1->padding_bug_score;
  824. // MPEG4 timing info
  825. memcpy(&s->last_time_base, &s1->last_time_base,
  826. (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
  827. (char *) &s1->last_time_base);
  828. // B-frame info
  829. s->max_b_frames = s1->max_b_frames;
  830. s->low_delay = s1->low_delay;
  831. s->droppable = s1->droppable;
  832. // DivX handling (doesn't work)
  833. s->divx_packed = s1->divx_packed;
  834. if (s1->bitstream_buffer) {
  835. if (s1->bitstream_buffer_size +
  836. FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
  837. av_fast_malloc(&s->bitstream_buffer,
  838. &s->allocated_bitstream_buffer_size,
  839. s1->allocated_bitstream_buffer_size);
  840. s->bitstream_buffer_size = s1->bitstream_buffer_size;
  841. memcpy(s->bitstream_buffer, s1->bitstream_buffer,
  842. s1->bitstream_buffer_size);
  843. memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
  844. FF_INPUT_BUFFER_PADDING_SIZE);
  845. }
  846. // linesize dependend scratch buffer allocation
  847. if (!s->edge_emu_buffer)
  848. if (s1->linesize) {
  849. if (frame_size_alloc(s, s1->linesize) < 0) {
  850. av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
  851. "scratch buffers.\n");
  852. return AVERROR(ENOMEM);
  853. }
  854. } else {
  855. av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
  856. "be allocated due to unknown size.\n");
  857. }
  858. // MPEG2/interlacing info
  859. memcpy(&s->progressive_sequence, &s1->progressive_sequence,
  860. (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
  861. if (!s1->first_field) {
  862. s->last_pict_type = s1->pict_type;
  863. if (s1->current_picture_ptr)
  864. s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
  865. }
  866. return 0;
  867. }
  868. /**
  869. * Set the given MpegEncContext to common defaults
  870. * (same for encoding and decoding).
  871. * The changed fields will not depend upon the
  872. * prior state of the MpegEncContext.
  873. */
  874. void ff_MPV_common_defaults(MpegEncContext *s)
  875. {
  876. s->y_dc_scale_table =
  877. s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
  878. s->chroma_qscale_table = ff_default_chroma_qscale_table;
  879. s->progressive_frame = 1;
  880. s->progressive_sequence = 1;
  881. s->picture_structure = PICT_FRAME;
  882. s->coded_picture_number = 0;
  883. s->picture_number = 0;
  884. s->f_code = 1;
  885. s->b_code = 1;
  886. s->slice_context_count = 1;
  887. }
  888. /**
  889. * Set the given MpegEncContext to defaults for decoding.
  890. * the changed fields will not depend upon
  891. * the prior state of the MpegEncContext.
  892. */
  893. void ff_MPV_decode_defaults(MpegEncContext *s)
  894. {
  895. ff_MPV_common_defaults(s);
  896. }
  897. static int init_er(MpegEncContext *s)
  898. {
  899. ERContext *er = &s->er;
  900. int mb_array_size = s->mb_height * s->mb_stride;
  901. int i;
  902. er->avctx = s->avctx;
  903. er->dsp = &s->dsp;
  904. er->mb_index2xy = s->mb_index2xy;
  905. er->mb_num = s->mb_num;
  906. er->mb_width = s->mb_width;
  907. er->mb_height = s->mb_height;
  908. er->mb_stride = s->mb_stride;
  909. er->b8_stride = s->b8_stride;
  910. er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
  911. er->error_status_table = av_mallocz(mb_array_size);
  912. if (!er->er_temp_buffer || !er->error_status_table)
  913. goto fail;
  914. er->mbskip_table = s->mbskip_table;
  915. er->mbintra_table = s->mbintra_table;
  916. for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
  917. er->dc_val[i] = s->dc_val[i];
  918. er->decode_mb = mpeg_er_decode_mb;
  919. er->opaque = s;
  920. return 0;
  921. fail:
  922. av_freep(&er->er_temp_buffer);
  923. av_freep(&er->error_status_table);
  924. return AVERROR(ENOMEM);
  925. }
  926. /**
  927. * Initialize and allocates MpegEncContext fields dependent on the resolution.
  928. */
  929. static int init_context_frame(MpegEncContext *s)
  930. {
  931. int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
  932. s->mb_width = (s->width + 15) / 16;
  933. s->mb_stride = s->mb_width + 1;
  934. s->b8_stride = s->mb_width * 2 + 1;
  935. s->b4_stride = s->mb_width * 4 + 1;
  936. mb_array_size = s->mb_height * s->mb_stride;
  937. mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
  938. /* set default edge pos, will be overriden
  939. * in decode_header if needed */
  940. s->h_edge_pos = s->mb_width * 16;
  941. s->v_edge_pos = s->mb_height * 16;
  942. s->mb_num = s->mb_width * s->mb_height;
  943. s->block_wrap[0] =
  944. s->block_wrap[1] =
  945. s->block_wrap[2] =
  946. s->block_wrap[3] = s->b8_stride;
  947. s->block_wrap[4] =
  948. s->block_wrap[5] = s->mb_stride;
  949. y_size = s->b8_stride * (2 * s->mb_height + 1);
  950. c_size = s->mb_stride * (s->mb_height + 1);
  951. yc_size = y_size + 2 * c_size;
  952. if (s->mb_height & 1)
  953. yc_size += 2*s->b8_stride + 2*s->mb_stride;
  954. FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
  955. for (y = 0; y < s->mb_height; y++)
  956. for (x = 0; x < s->mb_width; x++)
  957. s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
  958. s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
  959. if (s->encoding) {
  960. /* Allocate MV tables */
  961. FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
  962. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
  963. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
  964. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
  965. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
  966. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
  967. s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
  968. s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
  969. s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
  970. s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
  971. s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
  972. s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
  973. /* Allocate MB type table */
  974. FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
  975. FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
  976. FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
  977. mb_array_size * sizeof(float), fail);
  978. FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
  979. mb_array_size * sizeof(float), fail);
  980. }
  981. if (s->codec_id == AV_CODEC_ID_MPEG4 ||
  982. (s->flags & CODEC_FLAG_INTERLACED_ME)) {
  983. /* interlaced direct mode decoding tables */
  984. for (i = 0; i < 2; i++) {
  985. int j, k;
  986. for (j = 0; j < 2; j++) {
  987. for (k = 0; k < 2; k++) {
  988. FF_ALLOCZ_OR_GOTO(s->avctx,
  989. s->b_field_mv_table_base[i][j][k],
  990. mv_table_size * 2 * sizeof(int16_t),
  991. fail);
  992. s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
  993. s->mb_stride + 1;
  994. }
  995. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
  996. FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
  997. s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
  998. }
  999. FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
  1000. }
  1001. }
  1002. if (s->out_format == FMT_H263) {
  1003. /* cbp values */
  1004. FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride, fail);
  1005. s->coded_block = s->coded_block_base + s->b8_stride + 1;
  1006. /* cbp, ac_pred, pred_dir */
  1007. FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
  1008. FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
  1009. }
  1010. if (s->h263_pred || s->h263_plus || !s->encoding) {
  1011. /* dc values */
  1012. // MN: we need these for error resilience of intra-frames
  1013. FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
  1014. s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
  1015. s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
  1016. s->dc_val[2] = s->dc_val[1] + c_size;
  1017. for (i = 0; i < yc_size; i++)
  1018. s->dc_val_base[i] = 1024;
  1019. }
  1020. /* which mb is a intra block */
  1021. FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
  1022. memset(s->mbintra_table, 1, mb_array_size);
  1023. /* init macroblock skip table */
  1024. FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
  1025. // Note the + 1 is for a quicker mpeg4 slice_end detection
  1026. return init_er(s);
  1027. fail:
  1028. return AVERROR(ENOMEM);
  1029. }
  1030. /**
  1031. * init common structure for both encoder and decoder.
  1032. * this assumes that some variables like width/height are already set
  1033. */
  1034. av_cold int ff_MPV_common_init(MpegEncContext *s)
  1035. {
  1036. int i;
  1037. int nb_slices = (HAVE_THREADS &&
  1038. s->avctx->active_thread_type & FF_THREAD_SLICE) ?
  1039. s->avctx->thread_count : 1;
  1040. if (s->encoding && s->avctx->slices)
  1041. nb_slices = s->avctx->slices;
  1042. if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
  1043. s->mb_height = (s->height + 31) / 32 * 2;
  1044. else
  1045. s->mb_height = (s->height + 15) / 16;
  1046. if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
  1047. av_log(s->avctx, AV_LOG_ERROR,
  1048. "decoding to AV_PIX_FMT_NONE is not supported.\n");
  1049. return -1;
  1050. }
  1051. if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
  1052. int max_slices;
  1053. if (s->mb_height)
  1054. max_slices = FFMIN(MAX_THREADS, s->mb_height);
  1055. else
  1056. max_slices = MAX_THREADS;
  1057. av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
  1058. " reducing to %d\n", nb_slices, max_slices);
  1059. nb_slices = max_slices;
  1060. }
  1061. if ((s->width || s->height) &&
  1062. av_image_check_size(s->width, s->height, 0, s->avctx))
  1063. return -1;
  1064. ff_dct_common_init(s);
  1065. s->flags = s->avctx->flags;
  1066. s->flags2 = s->avctx->flags2;
  1067. /* set chroma shifts */
  1068. avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
  1069. &s->chroma_x_shift,
  1070. &s->chroma_y_shift);
  1071. /* convert fourcc to upper case */
  1072. s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
  1073. s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
  1074. FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
  1075. MAX_PICTURE_COUNT * sizeof(Picture), fail);
  1076. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1077. av_frame_unref(&s->picture[i].f);
  1078. }
  1079. memset(&s->next_picture, 0, sizeof(s->next_picture));
  1080. memset(&s->last_picture, 0, sizeof(s->last_picture));
  1081. memset(&s->current_picture, 0, sizeof(s->current_picture));
  1082. av_frame_unref(&s->next_picture.f);
  1083. av_frame_unref(&s->last_picture.f);
  1084. av_frame_unref(&s->current_picture.f);
  1085. if (init_context_frame(s))
  1086. goto fail;
  1087. s->parse_context.state = -1;
  1088. s->context_initialized = 1;
  1089. s->thread_context[0] = s;
  1090. // if (s->width && s->height) {
  1091. if (nb_slices > 1) {
  1092. for (i = 1; i < nb_slices; i++) {
  1093. s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
  1094. memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
  1095. }
  1096. for (i = 0; i < nb_slices; i++) {
  1097. if (init_duplicate_context(s->thread_context[i]) < 0)
  1098. goto fail;
  1099. s->thread_context[i]->start_mb_y =
  1100. (s->mb_height * (i) + nb_slices / 2) / nb_slices;
  1101. s->thread_context[i]->end_mb_y =
  1102. (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
  1103. }
  1104. } else {
  1105. if (init_duplicate_context(s) < 0)
  1106. goto fail;
  1107. s->start_mb_y = 0;
  1108. s->end_mb_y = s->mb_height;
  1109. }
  1110. s->slice_context_count = nb_slices;
  1111. // }
  1112. return 0;
  1113. fail:
  1114. ff_MPV_common_end(s);
  1115. return -1;
  1116. }
  1117. /**
  1118. * Frees and resets MpegEncContext fields depending on the resolution.
  1119. * Is used during resolution changes to avoid a full reinitialization of the
  1120. * codec.
  1121. */
  1122. static int free_context_frame(MpegEncContext *s)
  1123. {
  1124. int i, j, k;
  1125. av_freep(&s->mb_type);
  1126. av_freep(&s->p_mv_table_base);
  1127. av_freep(&s->b_forw_mv_table_base);
  1128. av_freep(&s->b_back_mv_table_base);
  1129. av_freep(&s->b_bidir_forw_mv_table_base);
  1130. av_freep(&s->b_bidir_back_mv_table_base);
  1131. av_freep(&s->b_direct_mv_table_base);
  1132. s->p_mv_table = NULL;
  1133. s->b_forw_mv_table = NULL;
  1134. s->b_back_mv_table = NULL;
  1135. s->b_bidir_forw_mv_table = NULL;
  1136. s->b_bidir_back_mv_table = NULL;
  1137. s->b_direct_mv_table = NULL;
  1138. for (i = 0; i < 2; i++) {
  1139. for (j = 0; j < 2; j++) {
  1140. for (k = 0; k < 2; k++) {
  1141. av_freep(&s->b_field_mv_table_base[i][j][k]);
  1142. s->b_field_mv_table[i][j][k] = NULL;
  1143. }
  1144. av_freep(&s->b_field_select_table[i][j]);
  1145. av_freep(&s->p_field_mv_table_base[i][j]);
  1146. s->p_field_mv_table[i][j] = NULL;
  1147. }
  1148. av_freep(&s->p_field_select_table[i]);
  1149. }
  1150. av_freep(&s->dc_val_base);
  1151. av_freep(&s->coded_block_base);
  1152. av_freep(&s->mbintra_table);
  1153. av_freep(&s->cbp_table);
  1154. av_freep(&s->pred_dir_table);
  1155. av_freep(&s->mbskip_table);
  1156. av_freep(&s->er.error_status_table);
  1157. av_freep(&s->er.er_temp_buffer);
  1158. av_freep(&s->mb_index2xy);
  1159. av_freep(&s->lambda_table);
  1160. av_freep(&s->cplx_tab);
  1161. av_freep(&s->bits_tab);
  1162. s->linesize = s->uvlinesize = 0;
  1163. return 0;
  1164. }
  1165. int ff_MPV_common_frame_size_change(MpegEncContext *s)
  1166. {
  1167. int i, err = 0;
  1168. if (s->slice_context_count > 1) {
  1169. for (i = 0; i < s->slice_context_count; i++) {
  1170. free_duplicate_context(s->thread_context[i]);
  1171. }
  1172. for (i = 1; i < s->slice_context_count; i++) {
  1173. av_freep(&s->thread_context[i]);
  1174. }
  1175. } else
  1176. free_duplicate_context(s);
  1177. if ((err = free_context_frame(s)) < 0)
  1178. return err;
  1179. if (s->picture)
  1180. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1181. s->picture[i].needs_realloc = 1;
  1182. }
  1183. s->last_picture_ptr =
  1184. s->next_picture_ptr =
  1185. s->current_picture_ptr = NULL;
  1186. // init
  1187. if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
  1188. s->mb_height = (s->height + 31) / 32 * 2;
  1189. else
  1190. s->mb_height = (s->height + 15) / 16;
  1191. if ((s->width || s->height) &&
  1192. av_image_check_size(s->width, s->height, 0, s->avctx))
  1193. return AVERROR_INVALIDDATA;
  1194. if ((err = init_context_frame(s)))
  1195. goto fail;
  1196. s->thread_context[0] = s;
  1197. if (s->width && s->height) {
  1198. int nb_slices = s->slice_context_count;
  1199. if (nb_slices > 1) {
  1200. for (i = 1; i < nb_slices; i++) {
  1201. s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
  1202. memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
  1203. }
  1204. for (i = 0; i < nb_slices; i++) {
  1205. if (init_duplicate_context(s->thread_context[i]) < 0)
  1206. goto fail;
  1207. s->thread_context[i]->start_mb_y =
  1208. (s->mb_height * (i) + nb_slices / 2) / nb_slices;
  1209. s->thread_context[i]->end_mb_y =
  1210. (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
  1211. }
  1212. } else {
  1213. err = init_duplicate_context(s);
  1214. if (err < 0)
  1215. goto fail;
  1216. s->start_mb_y = 0;
  1217. s->end_mb_y = s->mb_height;
  1218. }
  1219. s->slice_context_count = nb_slices;
  1220. }
  1221. return 0;
  1222. fail:
  1223. ff_MPV_common_end(s);
  1224. return err;
  1225. }
  1226. /* init common structure for both encoder and decoder */
  1227. void ff_MPV_common_end(MpegEncContext *s)
  1228. {
  1229. int i;
  1230. if (s->slice_context_count > 1) {
  1231. for (i = 0; i < s->slice_context_count; i++) {
  1232. free_duplicate_context(s->thread_context[i]);
  1233. }
  1234. for (i = 1; i < s->slice_context_count; i++) {
  1235. av_freep(&s->thread_context[i]);
  1236. }
  1237. s->slice_context_count = 1;
  1238. } else free_duplicate_context(s);
  1239. av_freep(&s->parse_context.buffer);
  1240. s->parse_context.buffer_size = 0;
  1241. av_freep(&s->bitstream_buffer);
  1242. s->allocated_bitstream_buffer_size = 0;
  1243. if (s->picture) {
  1244. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1245. ff_free_picture_tables(&s->picture[i]);
  1246. ff_mpeg_unref_picture(s, &s->picture[i]);
  1247. }
  1248. }
  1249. av_freep(&s->picture);
  1250. ff_free_picture_tables(&s->last_picture);
  1251. ff_mpeg_unref_picture(s, &s->last_picture);
  1252. ff_free_picture_tables(&s->current_picture);
  1253. ff_mpeg_unref_picture(s, &s->current_picture);
  1254. ff_free_picture_tables(&s->next_picture);
  1255. ff_mpeg_unref_picture(s, &s->next_picture);
  1256. free_context_frame(s);
  1257. s->context_initialized = 0;
  1258. s->last_picture_ptr =
  1259. s->next_picture_ptr =
  1260. s->current_picture_ptr = NULL;
  1261. s->linesize = s->uvlinesize = 0;
  1262. }
  1263. av_cold void ff_init_rl(RLTable *rl,
  1264. uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
  1265. {
  1266. int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
  1267. uint8_t index_run[MAX_RUN + 1];
  1268. int last, run, level, start, end, i;
  1269. /* If table is static, we can quit if rl->max_level[0] is not NULL */
  1270. if (static_store && rl->max_level[0])
  1271. return;
  1272. /* compute max_level[], max_run[] and index_run[] */
  1273. for (last = 0; last < 2; last++) {
  1274. if (last == 0) {
  1275. start = 0;
  1276. end = rl->last;
  1277. } else {
  1278. start = rl->last;
  1279. end = rl->n;
  1280. }
  1281. memset(max_level, 0, MAX_RUN + 1);
  1282. memset(max_run, 0, MAX_LEVEL + 1);
  1283. memset(index_run, rl->n, MAX_RUN + 1);
  1284. for (i = start; i < end; i++) {
  1285. run = rl->table_run[i];
  1286. level = rl->table_level[i];
  1287. if (index_run[run] == rl->n)
  1288. index_run[run] = i;
  1289. if (level > max_level[run])
  1290. max_level[run] = level;
  1291. if (run > max_run[level])
  1292. max_run[level] = run;
  1293. }
  1294. if (static_store)
  1295. rl->max_level[last] = static_store[last];
  1296. else
  1297. rl->max_level[last] = av_malloc(MAX_RUN + 1);
  1298. memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
  1299. if (static_store)
  1300. rl->max_run[last] = static_store[last] + MAX_RUN + 1;
  1301. else
  1302. rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
  1303. memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
  1304. if (static_store)
  1305. rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
  1306. else
  1307. rl->index_run[last] = av_malloc(MAX_RUN + 1);
  1308. memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
  1309. }
  1310. }
  1311. av_cold void ff_init_vlc_rl(RLTable *rl)
  1312. {
  1313. int i, q;
  1314. for (q = 0; q < 32; q++) {
  1315. int qmul = q * 2;
  1316. int qadd = (q - 1) | 1;
  1317. if (q == 0) {
  1318. qmul = 1;
  1319. qadd = 0;
  1320. }
  1321. for (i = 0; i < rl->vlc.table_size; i++) {
  1322. int code = rl->vlc.table[i][0];
  1323. int len = rl->vlc.table[i][1];
  1324. int level, run;
  1325. if (len == 0) { // illegal code
  1326. run = 66;
  1327. level = MAX_LEVEL;
  1328. } else if (len < 0) { // more bits needed
  1329. run = 0;
  1330. level = code;
  1331. } else {
  1332. if (code == rl->n) { // esc
  1333. run = 66;
  1334. level = 0;
  1335. } else {
  1336. run = rl->table_run[code] + 1;
  1337. level = rl->table_level[code] * qmul + qadd;
  1338. if (code >= rl->last) run += 192;
  1339. }
  1340. }
  1341. rl->rl_vlc[q][i].len = len;
  1342. rl->rl_vlc[q][i].level = level;
  1343. rl->rl_vlc[q][i].run = run;
  1344. }
  1345. }
  1346. }
  1347. static void release_unused_pictures(MpegEncContext *s)
  1348. {
  1349. int i;
  1350. /* release non reference frames */
  1351. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1352. if (!s->picture[i].reference)
  1353. ff_mpeg_unref_picture(s, &s->picture[i]);
  1354. }
  1355. }
  1356. static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
  1357. {
  1358. if (pic == s->last_picture_ptr)
  1359. return 0;
  1360. if (pic->f.buf[0] == NULL)
  1361. return 1;
  1362. if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
  1363. return 1;
  1364. return 0;
  1365. }
  1366. static int find_unused_picture(MpegEncContext *s, int shared)
  1367. {
  1368. int i;
  1369. if (shared) {
  1370. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1371. if (s->picture[i].f.buf[0] == NULL && &s->picture[i] != s->last_picture_ptr)
  1372. return i;
  1373. }
  1374. } else {
  1375. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1376. if (pic_is_unused(s, &s->picture[i]))
  1377. return i;
  1378. }
  1379. }
  1380. av_log(s->avctx, AV_LOG_FATAL,
  1381. "Internal error, picture buffer overflow\n");
  1382. /* We could return -1, but the codec would crash trying to draw into a
  1383. * non-existing frame anyway. This is safer than waiting for a random crash.
  1384. * Also the return of this is never useful, an encoder must only allocate
  1385. * as much as allowed in the specification. This has no relationship to how
  1386. * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
  1387. * enough for such valid streams).
  1388. * Plus, a decoder has to check stream validity and remove frames if too
  1389. * many reference frames are around. Waiting for "OOM" is not correct at
  1390. * all. Similarly, missing reference frames have to be replaced by
  1391. * interpolated/MC frames, anything else is a bug in the codec ...
  1392. */
  1393. abort();
  1394. return -1;
  1395. }
  1396. int ff_find_unused_picture(MpegEncContext *s, int shared)
  1397. {
  1398. int ret = find_unused_picture(s, shared);
  1399. if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
  1400. if (s->picture[ret].needs_realloc) {
  1401. s->picture[ret].needs_realloc = 0;
  1402. ff_free_picture_tables(&s->picture[ret]);
  1403. ff_mpeg_unref_picture(s, &s->picture[ret]);
  1404. }
  1405. }
  1406. return ret;
  1407. }
  1408. /**
  1409. * generic function called after decoding
  1410. * the header and before a frame is decoded.
  1411. */
  1412. int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
  1413. {
  1414. int i, ret;
  1415. Picture *pic;
  1416. s->mb_skipped = 0;
  1417. if (!ff_thread_can_start_frame(avctx)) {
  1418. av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
  1419. return -1;
  1420. }
  1421. /* mark & release old frames */
  1422. if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
  1423. s->last_picture_ptr != s->next_picture_ptr &&
  1424. s->last_picture_ptr->f.buf[0]) {
  1425. ff_mpeg_unref_picture(s, s->last_picture_ptr);
  1426. }
  1427. /* release forgotten pictures */
  1428. /* if (mpeg124/h263) */
  1429. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1430. if (&s->picture[i] != s->last_picture_ptr &&
  1431. &s->picture[i] != s->next_picture_ptr &&
  1432. s->picture[i].reference && !s->picture[i].needs_realloc) {
  1433. if (!(avctx->active_thread_type & FF_THREAD_FRAME))
  1434. av_log(avctx, AV_LOG_ERROR,
  1435. "releasing zombie picture\n");
  1436. ff_mpeg_unref_picture(s, &s->picture[i]);
  1437. }
  1438. }
  1439. ff_mpeg_unref_picture(s, &s->current_picture);
  1440. release_unused_pictures(s);
  1441. if (s->current_picture_ptr &&
  1442. s->current_picture_ptr->f.buf[0] == NULL) {
  1443. // we already have a unused image
  1444. // (maybe it was set before reading the header)
  1445. pic = s->current_picture_ptr;
  1446. } else {
  1447. i = ff_find_unused_picture(s, 0);
  1448. if (i < 0) {
  1449. av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
  1450. return i;
  1451. }
  1452. pic = &s->picture[i];
  1453. }
  1454. pic->reference = 0;
  1455. if (!s->droppable) {
  1456. if (s->pict_type != AV_PICTURE_TYPE_B)
  1457. pic->reference = 3;
  1458. }
  1459. pic->f.coded_picture_number = s->coded_picture_number++;
  1460. if (ff_alloc_picture(s, pic, 0) < 0)
  1461. return -1;
  1462. s->current_picture_ptr = pic;
  1463. // FIXME use only the vars from current_pic
  1464. s->current_picture_ptr->f.top_field_first = s->top_field_first;
  1465. if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
  1466. s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
  1467. if (s->picture_structure != PICT_FRAME)
  1468. s->current_picture_ptr->f.top_field_first =
  1469. (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
  1470. }
  1471. s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
  1472. !s->progressive_sequence;
  1473. s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
  1474. s->current_picture_ptr->f.pict_type = s->pict_type;
  1475. // if (s->flags && CODEC_FLAG_QSCALE)
  1476. // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
  1477. s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
  1478. if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
  1479. s->current_picture_ptr)) < 0)
  1480. return ret;
  1481. if (s->pict_type != AV_PICTURE_TYPE_B) {
  1482. s->last_picture_ptr = s->next_picture_ptr;
  1483. if (!s->droppable)
  1484. s->next_picture_ptr = s->current_picture_ptr;
  1485. }
  1486. av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
  1487. s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
  1488. s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
  1489. s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
  1490. s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
  1491. s->pict_type, s->droppable);
  1492. if ((s->last_picture_ptr == NULL ||
  1493. s->last_picture_ptr->f.buf[0] == NULL) &&
  1494. (s->pict_type != AV_PICTURE_TYPE_I ||
  1495. s->picture_structure != PICT_FRAME)) {
  1496. int h_chroma_shift, v_chroma_shift;
  1497. av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
  1498. &h_chroma_shift, &v_chroma_shift);
  1499. if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f.buf[0])
  1500. av_log(avctx, AV_LOG_DEBUG,
  1501. "allocating dummy last picture for B frame\n");
  1502. else if (s->pict_type != AV_PICTURE_TYPE_I)
  1503. av_log(avctx, AV_LOG_ERROR,
  1504. "warning: first frame is no keyframe\n");
  1505. else if (s->picture_structure != PICT_FRAME)
  1506. av_log(avctx, AV_LOG_DEBUG,
  1507. "allocate dummy last picture for field based first keyframe\n");
  1508. /* Allocate a dummy frame */
  1509. i = ff_find_unused_picture(s, 0);
  1510. if (i < 0) {
  1511. av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
  1512. return i;
  1513. }
  1514. s->last_picture_ptr = &s->picture[i];
  1515. s->last_picture_ptr->reference = 3;
  1516. s->last_picture_ptr->f.key_frame = 0;
  1517. s->last_picture_ptr->f.pict_type = AV_PICTURE_TYPE_P;
  1518. if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
  1519. s->last_picture_ptr = NULL;
  1520. return -1;
  1521. }
  1522. if (!avctx->hwaccel) {
  1523. for(i=0; i<avctx->height; i++)
  1524. memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i,
  1525. 0x80, avctx->width);
  1526. for(i=0; i<FF_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
  1527. memset(s->last_picture_ptr->f.data[1] + s->last_picture_ptr->f.linesize[1]*i,
  1528. 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
  1529. memset(s->last_picture_ptr->f.data[2] + s->last_picture_ptr->f.linesize[2]*i,
  1530. 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
  1531. }
  1532. if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
  1533. for(i=0; i<avctx->height; i++)
  1534. memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
  1535. }
  1536. }
  1537. ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
  1538. ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
  1539. }
  1540. if ((s->next_picture_ptr == NULL ||
  1541. s->next_picture_ptr->f.buf[0] == NULL) &&
  1542. s->pict_type == AV_PICTURE_TYPE_B) {
  1543. /* Allocate a dummy frame */
  1544. i = ff_find_unused_picture(s, 0);
  1545. if (i < 0) {
  1546. av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
  1547. return i;
  1548. }
  1549. s->next_picture_ptr = &s->picture[i];
  1550. s->next_picture_ptr->reference = 3;
  1551. s->next_picture_ptr->f.key_frame = 0;
  1552. s->next_picture_ptr->f.pict_type = AV_PICTURE_TYPE_P;
  1553. if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
  1554. s->next_picture_ptr = NULL;
  1555. return -1;
  1556. }
  1557. ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
  1558. ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
  1559. }
  1560. #if 0 // BUFREF-FIXME
  1561. memset(s->last_picture.f.data, 0, sizeof(s->last_picture.f.data));
  1562. memset(s->next_picture.f.data, 0, sizeof(s->next_picture.f.data));
  1563. #endif
  1564. if (s->last_picture_ptr) {
  1565. ff_mpeg_unref_picture(s, &s->last_picture);
  1566. if (s->last_picture_ptr->f.buf[0] &&
  1567. (ret = ff_mpeg_ref_picture(s, &s->last_picture,
  1568. s->last_picture_ptr)) < 0)
  1569. return ret;
  1570. }
  1571. if (s->next_picture_ptr) {
  1572. ff_mpeg_unref_picture(s, &s->next_picture);
  1573. if (s->next_picture_ptr->f.buf[0] &&
  1574. (ret = ff_mpeg_ref_picture(s, &s->next_picture,
  1575. s->next_picture_ptr)) < 0)
  1576. return ret;
  1577. }
  1578. av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
  1579. s->last_picture_ptr->f.buf[0]));
  1580. if (s->picture_structure!= PICT_FRAME) {
  1581. int i;
  1582. for (i = 0; i < 4; i++) {
  1583. if (s->picture_structure == PICT_BOTTOM_FIELD) {
  1584. s->current_picture.f.data[i] +=
  1585. s->current_picture.f.linesize[i];
  1586. }
  1587. s->current_picture.f.linesize[i] *= 2;
  1588. s->last_picture.f.linesize[i] *= 2;
  1589. s->next_picture.f.linesize[i] *= 2;
  1590. }
  1591. }
  1592. s->err_recognition = avctx->err_recognition;
  1593. /* set dequantizer, we can't do it during init as
  1594. * it might change for mpeg4 and we can't do it in the header
  1595. * decode as init is not called for mpeg4 there yet */
  1596. if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
  1597. s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
  1598. s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
  1599. } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
  1600. s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
  1601. s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
  1602. } else {
  1603. s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
  1604. s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
  1605. }
  1606. return 0;
  1607. }
  1608. /* called after a frame has been decoded. */
  1609. void ff_MPV_frame_end(MpegEncContext *s)
  1610. {
  1611. emms_c();
  1612. if (s->current_picture.reference)
  1613. ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
  1614. }
  1615. /**
  1616. * Draw a line from (ex, ey) -> (sx, sy).
  1617. * @param w width of the image
  1618. * @param h height of the image
  1619. * @param stride stride/linesize of the image
  1620. * @param color color of the arrow
  1621. */
  1622. static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
  1623. int w, int h, int stride, int color)
  1624. {
  1625. int x, y, fr, f;
  1626. sx = av_clip(sx, 0, w - 1);
  1627. sy = av_clip(sy, 0, h - 1);
  1628. ex = av_clip(ex, 0, w - 1);
  1629. ey = av_clip(ey, 0, h - 1);
  1630. buf[sy * stride + sx] += color;
  1631. if (FFABS(ex - sx) > FFABS(ey - sy)) {
  1632. if (sx > ex) {
  1633. FFSWAP(int, sx, ex);
  1634. FFSWAP(int, sy, ey);
  1635. }
  1636. buf += sx + sy * stride;
  1637. ex -= sx;
  1638. f = ((ey - sy) << 16) / ex;
  1639. for (x = 0; x <= ex; x++) {
  1640. y = (x * f) >> 16;
  1641. fr = (x * f) & 0xFFFF;
  1642. buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
  1643. if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
  1644. }
  1645. } else {
  1646. if (sy > ey) {
  1647. FFSWAP(int, sx, ex);
  1648. FFSWAP(int, sy, ey);
  1649. }
  1650. buf += sx + sy * stride;
  1651. ey -= sy;
  1652. if (ey)
  1653. f = ((ex - sx) << 16) / ey;
  1654. else
  1655. f = 0;
  1656. for(y= 0; y <= ey; y++){
  1657. x = (y*f) >> 16;
  1658. fr = (y*f) & 0xFFFF;
  1659. buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
  1660. if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
  1661. }
  1662. }
  1663. }
  1664. /**
  1665. * Draw an arrow from (ex, ey) -> (sx, sy).
  1666. * @param w width of the image
  1667. * @param h height of the image
  1668. * @param stride stride/linesize of the image
  1669. * @param color color of the arrow
  1670. */
  1671. static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
  1672. int ey, int w, int h, int stride, int color)
  1673. {
  1674. int dx,dy;
  1675. sx = av_clip(sx, -100, w + 100);
  1676. sy = av_clip(sy, -100, h + 100);
  1677. ex = av_clip(ex, -100, w + 100);
  1678. ey = av_clip(ey, -100, h + 100);
  1679. dx = ex - sx;
  1680. dy = ey - sy;
  1681. if (dx * dx + dy * dy > 3 * 3) {
  1682. int rx = dx + dy;
  1683. int ry = -dx + dy;
  1684. int length = ff_sqrt((rx * rx + ry * ry) << 8);
  1685. // FIXME subpixel accuracy
  1686. rx = ROUNDED_DIV(rx * 3 << 4, length);
  1687. ry = ROUNDED_DIV(ry * 3 << 4, length);
  1688. draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
  1689. draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
  1690. }
  1691. draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
  1692. }
  1693. /**
  1694. * Print debugging info for the given picture.
  1695. */
  1696. void ff_print_debug_info2(AVCodecContext *avctx, Picture *p, AVFrame *pict, uint8_t *mbskip_table,
  1697. int *low_delay,
  1698. int mb_width, int mb_height, int mb_stride, int quarter_sample)
  1699. {
  1700. if (avctx->hwaccel || !p || !p->mb_type
  1701. || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
  1702. return;
  1703. if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
  1704. int x,y;
  1705. av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
  1706. av_get_picture_type_char(pict->pict_type));
  1707. for (y = 0; y < mb_height; y++) {
  1708. for (x = 0; x < mb_width; x++) {
  1709. if (avctx->debug & FF_DEBUG_SKIP) {
  1710. int count = mbskip_table[x + y * mb_stride];
  1711. if (count > 9)
  1712. count = 9;
  1713. av_log(avctx, AV_LOG_DEBUG, "%1d", count);
  1714. }
  1715. if (avctx->debug & FF_DEBUG_QP) {
  1716. av_log(avctx, AV_LOG_DEBUG, "%2d",
  1717. p->qscale_table[x + y * mb_stride]);
  1718. }
  1719. if (avctx->debug & FF_DEBUG_MB_TYPE) {
  1720. int mb_type = p->mb_type[x + y * mb_stride];
  1721. // Type & MV direction
  1722. if (IS_PCM(mb_type))
  1723. av_log(avctx, AV_LOG_DEBUG, "P");
  1724. else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
  1725. av_log(avctx, AV_LOG_DEBUG, "A");
  1726. else if (IS_INTRA4x4(mb_type))
  1727. av_log(avctx, AV_LOG_DEBUG, "i");
  1728. else if (IS_INTRA16x16(mb_type))
  1729. av_log(avctx, AV_LOG_DEBUG, "I");
  1730. else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
  1731. av_log(avctx, AV_LOG_DEBUG, "d");
  1732. else if (IS_DIRECT(mb_type))
  1733. av_log(avctx, AV_LOG_DEBUG, "D");
  1734. else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
  1735. av_log(avctx, AV_LOG_DEBUG, "g");
  1736. else if (IS_GMC(mb_type))
  1737. av_log(avctx, AV_LOG_DEBUG, "G");
  1738. else if (IS_SKIP(mb_type))
  1739. av_log(avctx, AV_LOG_DEBUG, "S");
  1740. else if (!USES_LIST(mb_type, 1))
  1741. av_log(avctx, AV_LOG_DEBUG, ">");
  1742. else if (!USES_LIST(mb_type, 0))
  1743. av_log(avctx, AV_LOG_DEBUG, "<");
  1744. else {
  1745. av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
  1746. av_log(avctx, AV_LOG_DEBUG, "X");
  1747. }
  1748. // segmentation
  1749. if (IS_8X8(mb_type))
  1750. av_log(avctx, AV_LOG_DEBUG, "+");
  1751. else if (IS_16X8(mb_type))
  1752. av_log(avctx, AV_LOG_DEBUG, "-");
  1753. else if (IS_8X16(mb_type))
  1754. av_log(avctx, AV_LOG_DEBUG, "|");
  1755. else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
  1756. av_log(avctx, AV_LOG_DEBUG, " ");
  1757. else
  1758. av_log(avctx, AV_LOG_DEBUG, "?");
  1759. if (IS_INTERLACED(mb_type))
  1760. av_log(avctx, AV_LOG_DEBUG, "=");
  1761. else
  1762. av_log(avctx, AV_LOG_DEBUG, " ");
  1763. }
  1764. }
  1765. av_log(avctx, AV_LOG_DEBUG, "\n");
  1766. }
  1767. }
  1768. if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
  1769. (avctx->debug_mv)) {
  1770. const int shift = 1 + quarter_sample;
  1771. int mb_y;
  1772. uint8_t *ptr;
  1773. int i;
  1774. int h_chroma_shift, v_chroma_shift, block_height;
  1775. const int width = avctx->width;
  1776. const int height = avctx->height;
  1777. const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
  1778. const int mv_stride = (mb_width << mv_sample_log2) +
  1779. (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
  1780. *low_delay = 0; // needed to see the vectors without trashing the buffers
  1781. avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
  1782. av_frame_make_writable(pict);
  1783. pict->opaque = NULL;
  1784. ptr = pict->data[0];
  1785. block_height = 16 >> v_chroma_shift;
  1786. for (mb_y = 0; mb_y < mb_height; mb_y++) {
  1787. int mb_x;
  1788. for (mb_x = 0; mb_x < mb_width; mb_x++) {
  1789. const int mb_index = mb_x + mb_y * mb_stride;
  1790. if ((avctx->debug_mv) && p->motion_val[0]) {
  1791. int type;
  1792. for (type = 0; type < 3; type++) {
  1793. int direction = 0;
  1794. switch (type) {
  1795. case 0:
  1796. if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
  1797. (pict->pict_type!= AV_PICTURE_TYPE_P))
  1798. continue;
  1799. direction = 0;
  1800. break;
  1801. case 1:
  1802. if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
  1803. (pict->pict_type!= AV_PICTURE_TYPE_B))
  1804. continue;
  1805. direction = 0;
  1806. break;
  1807. case 2:
  1808. if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
  1809. (pict->pict_type!= AV_PICTURE_TYPE_B))
  1810. continue;
  1811. direction = 1;
  1812. break;
  1813. }
  1814. if (!USES_LIST(p->mb_type[mb_index], direction))
  1815. continue;
  1816. if (IS_8X8(p->mb_type[mb_index])) {
  1817. int i;
  1818. for (i = 0; i < 4; i++) {
  1819. int sx = mb_x * 16 + 4 + 8 * (i & 1);
  1820. int sy = mb_y * 16 + 4 + 8 * (i >> 1);
  1821. int xy = (mb_x * 2 + (i & 1) +
  1822. (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
  1823. int mx = (p->motion_val[direction][xy][0] >> shift) + sx;
  1824. int my = (p->motion_val[direction][xy][1] >> shift) + sy;
  1825. draw_arrow(ptr, sx, sy, mx, my, width,
  1826. height, pict->linesize[0], 100);
  1827. }
  1828. } else if (IS_16X8(p->mb_type[mb_index])) {
  1829. int i;
  1830. for (i = 0; i < 2; i++) {
  1831. int sx = mb_x * 16 + 8;
  1832. int sy = mb_y * 16 + 4 + 8 * i;
  1833. int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
  1834. int mx = (p->motion_val[direction][xy][0] >> shift);
  1835. int my = (p->motion_val[direction][xy][1] >> shift);
  1836. if (IS_INTERLACED(p->mb_type[mb_index]))
  1837. my *= 2;
  1838. draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
  1839. height, pict->linesize[0], 100);
  1840. }
  1841. } else if (IS_8X16(p->mb_type[mb_index])) {
  1842. int i;
  1843. for (i = 0; i < 2; i++) {
  1844. int sx = mb_x * 16 + 4 + 8 * i;
  1845. int sy = mb_y * 16 + 8;
  1846. int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
  1847. int mx = p->motion_val[direction][xy][0] >> shift;
  1848. int my = p->motion_val[direction][xy][1] >> shift;
  1849. if (IS_INTERLACED(p->mb_type[mb_index]))
  1850. my *= 2;
  1851. draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
  1852. height, pict->linesize[0], 100);
  1853. }
  1854. } else {
  1855. int sx= mb_x * 16 + 8;
  1856. int sy= mb_y * 16 + 8;
  1857. int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
  1858. int mx= (p->motion_val[direction][xy][0]>>shift) + sx;
  1859. int my= (p->motion_val[direction][xy][1]>>shift) + sy;
  1860. draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100);
  1861. }
  1862. }
  1863. }
  1864. if ((avctx->debug & FF_DEBUG_VIS_QP)) {
  1865. uint64_t c = (p->qscale_table[mb_index] * 128 / 31) *
  1866. 0x0101010101010101ULL;
  1867. int y;
  1868. for (y = 0; y < block_height; y++) {
  1869. *(uint64_t *)(pict->data[1] + 8 * mb_x +
  1870. (block_height * mb_y + y) *
  1871. pict->linesize[1]) = c;
  1872. *(uint64_t *)(pict->data[2] + 8 * mb_x +
  1873. (block_height * mb_y + y) *
  1874. pict->linesize[2]) = c;
  1875. }
  1876. }
  1877. if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
  1878. p->motion_val[0]) {
  1879. int mb_type = p->mb_type[mb_index];
  1880. uint64_t u,v;
  1881. int y;
  1882. #define COLOR(theta, r) \
  1883. u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
  1884. v = (int)(128 + r * sin(theta * 3.141592 / 180));
  1885. u = v = 128;
  1886. if (IS_PCM(mb_type)) {
  1887. COLOR(120, 48)
  1888. } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
  1889. IS_INTRA16x16(mb_type)) {
  1890. COLOR(30, 48)
  1891. } else if (IS_INTRA4x4(mb_type)) {
  1892. COLOR(90, 48)
  1893. } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
  1894. // COLOR(120, 48)
  1895. } else if (IS_DIRECT(mb_type)) {
  1896. COLOR(150, 48)
  1897. } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
  1898. COLOR(170, 48)
  1899. } else if (IS_GMC(mb_type)) {
  1900. COLOR(190, 48)
  1901. } else if (IS_SKIP(mb_type)) {
  1902. // COLOR(180, 48)
  1903. } else if (!USES_LIST(mb_type, 1)) {
  1904. COLOR(240, 48)
  1905. } else if (!USES_LIST(mb_type, 0)) {
  1906. COLOR(0, 48)
  1907. } else {
  1908. av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
  1909. COLOR(300,48)
  1910. }
  1911. u *= 0x0101010101010101ULL;
  1912. v *= 0x0101010101010101ULL;
  1913. for (y = 0; y < block_height; y++) {
  1914. *(uint64_t *)(pict->data[1] + 8 * mb_x +
  1915. (block_height * mb_y + y) * pict->linesize[1]) = u;
  1916. *(uint64_t *)(pict->data[2] + 8 * mb_x +
  1917. (block_height * mb_y + y) * pict->linesize[2]) = v;
  1918. }
  1919. // segmentation
  1920. if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
  1921. *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
  1922. (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
  1923. *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
  1924. (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
  1925. }
  1926. if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
  1927. for (y = 0; y < 16; y++)
  1928. pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
  1929. pict->linesize[0]] ^= 0x80;
  1930. }
  1931. if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
  1932. int dm = 1 << (mv_sample_log2 - 2);
  1933. for (i = 0; i < 4; i++) {
  1934. int sx = mb_x * 16 + 8 * (i & 1);
  1935. int sy = mb_y * 16 + 8 * (i >> 1);
  1936. int xy = (mb_x * 2 + (i & 1) +
  1937. (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
  1938. // FIXME bidir
  1939. int32_t *mv = (int32_t *) &p->motion_val[0][xy];
  1940. if (mv[0] != mv[dm] ||
  1941. mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
  1942. for (y = 0; y < 8; y++)
  1943. pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
  1944. if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
  1945. *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
  1946. pict->linesize[0]) ^= 0x8080808080808080ULL;
  1947. }
  1948. }
  1949. if (IS_INTERLACED(mb_type) &&
  1950. avctx->codec->id == AV_CODEC_ID_H264) {
  1951. // hmm
  1952. }
  1953. }
  1954. mbskip_table[mb_index] = 0;
  1955. }
  1956. }
  1957. }
  1958. }
  1959. void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
  1960. {
  1961. ff_print_debug_info2(s->avctx, p, pict, s->mbskip_table, &s->low_delay,
  1962. s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
  1963. }
  1964. int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
  1965. {
  1966. AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
  1967. int offset = 2*s->mb_stride + 1;
  1968. if(!ref)
  1969. return AVERROR(ENOMEM);
  1970. av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
  1971. ref->size -= offset;
  1972. ref->data += offset;
  1973. return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
  1974. }
  1975. static inline int hpel_motion_lowres(MpegEncContext *s,
  1976. uint8_t *dest, uint8_t *src,
  1977. int field_based, int field_select,
  1978. int src_x, int src_y,
  1979. int width, int height, ptrdiff_t stride,
  1980. int h_edge_pos, int v_edge_pos,
  1981. int w, int h, h264_chroma_mc_func *pix_op,
  1982. int motion_x, int motion_y)
  1983. {
  1984. const int lowres = s->avctx->lowres;
  1985. const int op_index = FFMIN(lowres, 3);
  1986. const int s_mask = (2 << lowres) - 1;
  1987. int emu = 0;
  1988. int sx, sy;
  1989. if (s->quarter_sample) {
  1990. motion_x /= 2;
  1991. motion_y /= 2;
  1992. }
  1993. sx = motion_x & s_mask;
  1994. sy = motion_y & s_mask;
  1995. src_x += motion_x >> lowres + 1;
  1996. src_y += motion_y >> lowres + 1;
  1997. src += src_y * stride + src_x;
  1998. if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
  1999. (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
  2000. s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
  2001. s->linesize, s->linesize,
  2002. w + 1, (h + 1) << field_based,
  2003. src_x, src_y << field_based,
  2004. h_edge_pos, v_edge_pos);
  2005. src = s->edge_emu_buffer;
  2006. emu = 1;
  2007. }
  2008. sx = (sx << 2) >> lowres;
  2009. sy = (sy << 2) >> lowres;
  2010. if (field_select)
  2011. src += s->linesize;
  2012. pix_op[op_index](dest, src, stride, h, sx, sy);
  2013. return emu;
  2014. }
  2015. /* apply one mpeg motion vector to the three components */
  2016. static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
  2017. uint8_t *dest_y,
  2018. uint8_t *dest_cb,
  2019. uint8_t *dest_cr,
  2020. int field_based,
  2021. int bottom_field,
  2022. int field_select,
  2023. uint8_t **ref_picture,
  2024. h264_chroma_mc_func *pix_op,
  2025. int motion_x, int motion_y,
  2026. int h, int mb_y)
  2027. {
  2028. uint8_t *ptr_y, *ptr_cb, *ptr_cr;
  2029. int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
  2030. ptrdiff_t uvlinesize, linesize;
  2031. const int lowres = s->avctx->lowres;
  2032. const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
  2033. const int block_s = 8>>lowres;
  2034. const int s_mask = (2 << lowres) - 1;
  2035. const int h_edge_pos = s->h_edge_pos >> lowres;
  2036. const int v_edge_pos = s->v_edge_pos >> lowres;
  2037. linesize = s->current_picture.f.linesize[0] << field_based;
  2038. uvlinesize = s->current_picture.f.linesize[1] << field_based;
  2039. // FIXME obviously not perfect but qpel will not work in lowres anyway
  2040. if (s->quarter_sample) {
  2041. motion_x /= 2;
  2042. motion_y /= 2;
  2043. }
  2044. if(field_based){
  2045. motion_y += (bottom_field - field_select)*((1 << lowres)-1);
  2046. }
  2047. sx = motion_x & s_mask;
  2048. sy = motion_y & s_mask;
  2049. src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
  2050. src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
  2051. if (s->out_format == FMT_H263) {
  2052. uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
  2053. uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
  2054. uvsrc_x = src_x >> 1;
  2055. uvsrc_y = src_y >> 1;
  2056. } else if (s->out_format == FMT_H261) {
  2057. // even chroma mv's are full pel in H261
  2058. mx = motion_x / 4;
  2059. my = motion_y / 4;
  2060. uvsx = (2 * mx) & s_mask;
  2061. uvsy = (2 * my) & s_mask;
  2062. uvsrc_x = s->mb_x * block_s + (mx >> lowres);
  2063. uvsrc_y = mb_y * block_s + (my >> lowres);
  2064. } else {
  2065. if(s->chroma_y_shift){
  2066. mx = motion_x / 2;
  2067. my = motion_y / 2;
  2068. uvsx = mx & s_mask;
  2069. uvsy = my & s_mask;
  2070. uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
  2071. uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
  2072. } else {
  2073. if(s->chroma_x_shift){
  2074. //Chroma422
  2075. mx = motion_x / 2;
  2076. uvsx = mx & s_mask;
  2077. uvsy = motion_y & s_mask;
  2078. uvsrc_y = src_y;
  2079. uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
  2080. } else {
  2081. //Chroma444
  2082. uvsx = motion_x & s_mask;
  2083. uvsy = motion_y & s_mask;
  2084. uvsrc_x = src_x;
  2085. uvsrc_y = src_y;
  2086. }
  2087. }
  2088. }
  2089. ptr_y = ref_picture[0] + src_y * linesize + src_x;
  2090. ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
  2091. ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
  2092. if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
  2093. (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
  2094. s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
  2095. linesize >> field_based, linesize >> field_based,
  2096. 17, 17 + field_based,
  2097. src_x, src_y << field_based, h_edge_pos,
  2098. v_edge_pos);
  2099. ptr_y = s->edge_emu_buffer;
  2100. if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
  2101. uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
  2102. s->vdsp.emulated_edge_mc(uvbuf, ptr_cb,
  2103. uvlinesize >> field_based, uvlinesize >> field_based,
  2104. 9, 9 + field_based,
  2105. uvsrc_x, uvsrc_y << field_based,
  2106. h_edge_pos >> 1, v_edge_pos >> 1);
  2107. s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr,
  2108. uvlinesize >> field_based,uvlinesize >> field_based,
  2109. 9, 9 + field_based,
  2110. uvsrc_x, uvsrc_y << field_based,
  2111. h_edge_pos >> 1, v_edge_pos >> 1);
  2112. ptr_cb = uvbuf;
  2113. ptr_cr = uvbuf + 16;
  2114. }
  2115. }
  2116. // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
  2117. if (bottom_field) {
  2118. dest_y += s->linesize;
  2119. dest_cb += s->uvlinesize;
  2120. dest_cr += s->uvlinesize;
  2121. }
  2122. if (field_select) {
  2123. ptr_y += s->linesize;
  2124. ptr_cb += s->uvlinesize;
  2125. ptr_cr += s->uvlinesize;
  2126. }
  2127. sx = (sx << 2) >> lowres;
  2128. sy = (sy << 2) >> lowres;
  2129. pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
  2130. if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
  2131. int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
  2132. uvsx = (uvsx << 2) >> lowres;
  2133. uvsy = (uvsy << 2) >> lowres;
  2134. if (hc) {
  2135. pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
  2136. pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
  2137. }
  2138. }
  2139. // FIXME h261 lowres loop filter
  2140. }
  2141. static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
  2142. uint8_t *dest_cb, uint8_t *dest_cr,
  2143. uint8_t **ref_picture,
  2144. h264_chroma_mc_func * pix_op,
  2145. int mx, int my)
  2146. {
  2147. const int lowres = s->avctx->lowres;
  2148. const int op_index = FFMIN(lowres, 3);
  2149. const int block_s = 8 >> lowres;
  2150. const int s_mask = (2 << lowres) - 1;
  2151. const int h_edge_pos = s->h_edge_pos >> lowres + 1;
  2152. const int v_edge_pos = s->v_edge_pos >> lowres + 1;
  2153. int emu = 0, src_x, src_y, sx, sy;
  2154. ptrdiff_t offset;
  2155. uint8_t *ptr;
  2156. if (s->quarter_sample) {
  2157. mx /= 2;
  2158. my /= 2;
  2159. }
  2160. /* In case of 8X8, we construct a single chroma motion vector
  2161. with a special rounding */
  2162. mx = ff_h263_round_chroma(mx);
  2163. my = ff_h263_round_chroma(my);
  2164. sx = mx & s_mask;
  2165. sy = my & s_mask;
  2166. src_x = s->mb_x * block_s + (mx >> lowres + 1);
  2167. src_y = s->mb_y * block_s + (my >> lowres + 1);
  2168. offset = src_y * s->uvlinesize + src_x;
  2169. ptr = ref_picture[1] + offset;
  2170. if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
  2171. (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
  2172. s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
  2173. s->uvlinesize, s->uvlinesize,
  2174. 9, 9,
  2175. src_x, src_y, h_edge_pos, v_edge_pos);
  2176. ptr = s->edge_emu_buffer;
  2177. emu = 1;
  2178. }
  2179. sx = (sx << 2) >> lowres;
  2180. sy = (sy << 2) >> lowres;
  2181. pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
  2182. ptr = ref_picture[2] + offset;
  2183. if (emu) {
  2184. s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
  2185. s->uvlinesize, s->uvlinesize,
  2186. 9, 9,
  2187. src_x, src_y, h_edge_pos, v_edge_pos);
  2188. ptr = s->edge_emu_buffer;
  2189. }
  2190. pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
  2191. }
  2192. /**
  2193. * motion compensation of a single macroblock
  2194. * @param s context
  2195. * @param dest_y luma destination pointer
  2196. * @param dest_cb chroma cb/u destination pointer
  2197. * @param dest_cr chroma cr/v destination pointer
  2198. * @param dir direction (0->forward, 1->backward)
  2199. * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
  2200. * @param pix_op halfpel motion compensation function (average or put normally)
  2201. * the motion vectors are taken from s->mv and the MV type from s->mv_type
  2202. */
  2203. static inline void MPV_motion_lowres(MpegEncContext *s,
  2204. uint8_t *dest_y, uint8_t *dest_cb,
  2205. uint8_t *dest_cr,
  2206. int dir, uint8_t **ref_picture,
  2207. h264_chroma_mc_func *pix_op)
  2208. {
  2209. int mx, my;
  2210. int mb_x, mb_y, i;
  2211. const int lowres = s->avctx->lowres;
  2212. const int block_s = 8 >>lowres;
  2213. mb_x = s->mb_x;
  2214. mb_y = s->mb_y;
  2215. switch (s->mv_type) {
  2216. case MV_TYPE_16X16:
  2217. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  2218. 0, 0, 0,
  2219. ref_picture, pix_op,
  2220. s->mv[dir][0][0], s->mv[dir][0][1],
  2221. 2 * block_s, mb_y);
  2222. break;
  2223. case MV_TYPE_8X8:
  2224. mx = 0;
  2225. my = 0;
  2226. for (i = 0; i < 4; i++) {
  2227. hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
  2228. s->linesize) * block_s,
  2229. ref_picture[0], 0, 0,
  2230. (2 * mb_x + (i & 1)) * block_s,
  2231. (2 * mb_y + (i >> 1)) * block_s,
  2232. s->width, s->height, s->linesize,
  2233. s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
  2234. block_s, block_s, pix_op,
  2235. s->mv[dir][i][0], s->mv[dir][i][1]);
  2236. mx += s->mv[dir][i][0];
  2237. my += s->mv[dir][i][1];
  2238. }
  2239. if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
  2240. chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
  2241. pix_op, mx, my);
  2242. break;
  2243. case MV_TYPE_FIELD:
  2244. if (s->picture_structure == PICT_FRAME) {
  2245. /* top field */
  2246. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  2247. 1, 0, s->field_select[dir][0],
  2248. ref_picture, pix_op,
  2249. s->mv[dir][0][0], s->mv[dir][0][1],
  2250. block_s, mb_y);
  2251. /* bottom field */
  2252. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  2253. 1, 1, s->field_select[dir][1],
  2254. ref_picture, pix_op,
  2255. s->mv[dir][1][0], s->mv[dir][1][1],
  2256. block_s, mb_y);
  2257. } else {
  2258. if (s->picture_structure != s->field_select[dir][0] + 1 &&
  2259. s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
  2260. ref_picture = s->current_picture_ptr->f.data;
  2261. }
  2262. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  2263. 0, 0, s->field_select[dir][0],
  2264. ref_picture, pix_op,
  2265. s->mv[dir][0][0],
  2266. s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
  2267. }
  2268. break;
  2269. case MV_TYPE_16X8:
  2270. for (i = 0; i < 2; i++) {
  2271. uint8_t **ref2picture;
  2272. if (s->picture_structure == s->field_select[dir][i] + 1 ||
  2273. s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
  2274. ref2picture = ref_picture;
  2275. } else {
  2276. ref2picture = s->current_picture_ptr->f.data;
  2277. }
  2278. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  2279. 0, 0, s->field_select[dir][i],
  2280. ref2picture, pix_op,
  2281. s->mv[dir][i][0], s->mv[dir][i][1] +
  2282. 2 * block_s * i, block_s, mb_y >> 1);
  2283. dest_y += 2 * block_s * s->linesize;
  2284. dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
  2285. dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
  2286. }
  2287. break;
  2288. case MV_TYPE_DMV:
  2289. if (s->picture_structure == PICT_FRAME) {
  2290. for (i = 0; i < 2; i++) {
  2291. int j;
  2292. for (j = 0; j < 2; j++) {
  2293. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  2294. 1, j, j ^ i,
  2295. ref_picture, pix_op,
  2296. s->mv[dir][2 * i + j][0],
  2297. s->mv[dir][2 * i + j][1],
  2298. block_s, mb_y);
  2299. }
  2300. pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
  2301. }
  2302. } else {
  2303. for (i = 0; i < 2; i++) {
  2304. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  2305. 0, 0, s->picture_structure != i + 1,
  2306. ref_picture, pix_op,
  2307. s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
  2308. 2 * block_s, mb_y >> 1);
  2309. // after put we make avg of the same block
  2310. pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
  2311. // opposite parity is always in the same
  2312. // frame if this is second field
  2313. if (!s->first_field) {
  2314. ref_picture = s->current_picture_ptr->f.data;
  2315. }
  2316. }
  2317. }
  2318. break;
  2319. default:
  2320. av_assert2(0);
  2321. }
  2322. }
  2323. /**
  2324. * find the lowest MB row referenced in the MVs
  2325. */
  2326. int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
  2327. {
  2328. int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
  2329. int my, off, i, mvs;
  2330. if (s->picture_structure != PICT_FRAME || s->mcsel)
  2331. goto unhandled;
  2332. switch (s->mv_type) {
  2333. case MV_TYPE_16X16:
  2334. mvs = 1;
  2335. break;
  2336. case MV_TYPE_16X8:
  2337. mvs = 2;
  2338. break;
  2339. case MV_TYPE_8X8:
  2340. mvs = 4;
  2341. break;
  2342. default:
  2343. goto unhandled;
  2344. }
  2345. for (i = 0; i < mvs; i++) {
  2346. my = s->mv[dir][i][1]<<qpel_shift;
  2347. my_max = FFMAX(my_max, my);
  2348. my_min = FFMIN(my_min, my);
  2349. }
  2350. off = (FFMAX(-my_min, my_max) + 63) >> 6;
  2351. return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
  2352. unhandled:
  2353. return s->mb_height-1;
  2354. }
  2355. /* put block[] to dest[] */
  2356. static inline void put_dct(MpegEncContext *s,
  2357. int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
  2358. {
  2359. s->dct_unquantize_intra(s, block, i, qscale);
  2360. s->dsp.idct_put (dest, line_size, block);
  2361. }
  2362. /* add block[] to dest[] */
  2363. static inline void add_dct(MpegEncContext *s,
  2364. int16_t *block, int i, uint8_t *dest, int line_size)
  2365. {
  2366. if (s->block_last_index[i] >= 0) {
  2367. s->dsp.idct_add (dest, line_size, block);
  2368. }
  2369. }
  2370. static inline void add_dequant_dct(MpegEncContext *s,
  2371. int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
  2372. {
  2373. if (s->block_last_index[i] >= 0) {
  2374. s->dct_unquantize_inter(s, block, i, qscale);
  2375. s->dsp.idct_add (dest, line_size, block);
  2376. }
  2377. }
  2378. /**
  2379. * Clean dc, ac, coded_block for the current non-intra MB.
  2380. */
  2381. void ff_clean_intra_table_entries(MpegEncContext *s)
  2382. {
  2383. int wrap = s->b8_stride;
  2384. int xy = s->block_index[0];
  2385. s->dc_val[0][xy ] =
  2386. s->dc_val[0][xy + 1 ] =
  2387. s->dc_val[0][xy + wrap] =
  2388. s->dc_val[0][xy + 1 + wrap] = 1024;
  2389. /* ac pred */
  2390. memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
  2391. memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
  2392. if (s->msmpeg4_version>=3) {
  2393. s->coded_block[xy ] =
  2394. s->coded_block[xy + 1 ] =
  2395. s->coded_block[xy + wrap] =
  2396. s->coded_block[xy + 1 + wrap] = 0;
  2397. }
  2398. /* chroma */
  2399. wrap = s->mb_stride;
  2400. xy = s->mb_x + s->mb_y * wrap;
  2401. s->dc_val[1][xy] =
  2402. s->dc_val[2][xy] = 1024;
  2403. /* ac pred */
  2404. memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
  2405. memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
  2406. s->mbintra_table[xy]= 0;
  2407. }
  2408. /* generic function called after a macroblock has been parsed by the
  2409. decoder or after it has been encoded by the encoder.
  2410. Important variables used:
  2411. s->mb_intra : true if intra macroblock
  2412. s->mv_dir : motion vector direction
  2413. s->mv_type : motion vector type
  2414. s->mv : motion vector
  2415. s->interlaced_dct : true if interlaced dct used (mpeg2)
  2416. */
  2417. static av_always_inline
  2418. void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
  2419. int lowres_flag, int is_mpeg12)
  2420. {
  2421. const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
  2422. if (CONFIG_XVMC &&
  2423. s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
  2424. s->avctx->hwaccel->decode_mb(s);//xvmc uses pblocks
  2425. return;
  2426. }
  2427. if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
  2428. /* print DCT coefficients */
  2429. int i,j;
  2430. av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
  2431. for(i=0; i<6; i++){
  2432. for(j=0; j<64; j++){
  2433. av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
  2434. }
  2435. av_log(s->avctx, AV_LOG_DEBUG, "\n");
  2436. }
  2437. }
  2438. s->current_picture.qscale_table[mb_xy] = s->qscale;
  2439. /* update DC predictors for P macroblocks */
  2440. if (!s->mb_intra) {
  2441. if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
  2442. if(s->mbintra_table[mb_xy])
  2443. ff_clean_intra_table_entries(s);
  2444. } else {
  2445. s->last_dc[0] =
  2446. s->last_dc[1] =
  2447. s->last_dc[2] = 128 << s->intra_dc_precision;
  2448. }
  2449. }
  2450. else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
  2451. s->mbintra_table[mb_xy]=1;
  2452. if ( (s->flags&CODEC_FLAG_PSNR)
  2453. || s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor
  2454. || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
  2455. uint8_t *dest_y, *dest_cb, *dest_cr;
  2456. int dct_linesize, dct_offset;
  2457. op_pixels_func (*op_pix)[4];
  2458. qpel_mc_func (*op_qpix)[16];
  2459. const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
  2460. const int uvlinesize = s->current_picture.f.linesize[1];
  2461. const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
  2462. const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
  2463. /* avoid copy if macroblock skipped in last frame too */
  2464. /* skip only during decoding as we might trash the buffers during encoding a bit */
  2465. if(!s->encoding){
  2466. uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
  2467. if (s->mb_skipped) {
  2468. s->mb_skipped= 0;
  2469. av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
  2470. *mbskip_ptr = 1;
  2471. } else if(!s->current_picture.reference) {
  2472. *mbskip_ptr = 1;
  2473. } else{
  2474. *mbskip_ptr = 0; /* not skipped */
  2475. }
  2476. }
  2477. dct_linesize = linesize << s->interlaced_dct;
  2478. dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
  2479. if(readable){
  2480. dest_y= s->dest[0];
  2481. dest_cb= s->dest[1];
  2482. dest_cr= s->dest[2];
  2483. }else{
  2484. dest_y = s->b_scratchpad;
  2485. dest_cb= s->b_scratchpad+16*linesize;
  2486. dest_cr= s->b_scratchpad+32*linesize;
  2487. }
  2488. if (!s->mb_intra) {
  2489. /* motion handling */
  2490. /* decoding or more than one mb_type (MC was already done otherwise) */
  2491. if(!s->encoding){
  2492. if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
  2493. if (s->mv_dir & MV_DIR_FORWARD) {
  2494. ff_thread_await_progress(&s->last_picture_ptr->tf,
  2495. ff_MPV_lowest_referenced_row(s, 0),
  2496. 0);
  2497. }
  2498. if (s->mv_dir & MV_DIR_BACKWARD) {
  2499. ff_thread_await_progress(&s->next_picture_ptr->tf,
  2500. ff_MPV_lowest_referenced_row(s, 1),
  2501. 0);
  2502. }
  2503. }
  2504. if(lowres_flag){
  2505. h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
  2506. if (s->mv_dir & MV_DIR_FORWARD) {
  2507. MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
  2508. op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
  2509. }
  2510. if (s->mv_dir & MV_DIR_BACKWARD) {
  2511. MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
  2512. }
  2513. }else{
  2514. op_qpix = s->me.qpel_put;
  2515. if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
  2516. op_pix = s->hdsp.put_pixels_tab;
  2517. }else{
  2518. op_pix = s->hdsp.put_no_rnd_pixels_tab;
  2519. }
  2520. if (s->mv_dir & MV_DIR_FORWARD) {
  2521. ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
  2522. op_pix = s->hdsp.avg_pixels_tab;
  2523. op_qpix= s->me.qpel_avg;
  2524. }
  2525. if (s->mv_dir & MV_DIR_BACKWARD) {
  2526. ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
  2527. }
  2528. }
  2529. }
  2530. /* skip dequant / idct if we are really late ;) */
  2531. if(s->avctx->skip_idct){
  2532. if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
  2533. ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
  2534. || s->avctx->skip_idct >= AVDISCARD_ALL)
  2535. goto skip_idct;
  2536. }
  2537. /* add dct residue */
  2538. if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
  2539. || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
  2540. add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
  2541. add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
  2542. add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
  2543. add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
  2544. if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
  2545. if (s->chroma_y_shift){
  2546. add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
  2547. add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
  2548. }else{
  2549. dct_linesize >>= 1;
  2550. dct_offset >>=1;
  2551. add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
  2552. add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
  2553. add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
  2554. add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
  2555. }
  2556. }
  2557. } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
  2558. add_dct(s, block[0], 0, dest_y , dct_linesize);
  2559. add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
  2560. add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
  2561. add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
  2562. if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
  2563. if(s->chroma_y_shift){//Chroma420
  2564. add_dct(s, block[4], 4, dest_cb, uvlinesize);
  2565. add_dct(s, block[5], 5, dest_cr, uvlinesize);
  2566. }else{
  2567. //chroma422
  2568. dct_linesize = uvlinesize << s->interlaced_dct;
  2569. dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
  2570. add_dct(s, block[4], 4, dest_cb, dct_linesize);
  2571. add_dct(s, block[5], 5, dest_cr, dct_linesize);
  2572. add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
  2573. add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
  2574. if(!s->chroma_x_shift){//Chroma444
  2575. add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
  2576. add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
  2577. add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
  2578. add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
  2579. }
  2580. }
  2581. }//fi gray
  2582. }
  2583. else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
  2584. ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
  2585. }
  2586. } else {
  2587. /* dct only in intra block */
  2588. if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
  2589. put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
  2590. put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
  2591. put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
  2592. put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
  2593. if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
  2594. if(s->chroma_y_shift){
  2595. put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
  2596. put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
  2597. }else{
  2598. dct_offset >>=1;
  2599. dct_linesize >>=1;
  2600. put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
  2601. put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
  2602. put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
  2603. put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
  2604. }
  2605. }
  2606. }else{
  2607. s->dsp.idct_put(dest_y , dct_linesize, block[0]);
  2608. s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
  2609. s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
  2610. s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
  2611. if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
  2612. if(s->chroma_y_shift){
  2613. s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
  2614. s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
  2615. }else{
  2616. dct_linesize = uvlinesize << s->interlaced_dct;
  2617. dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
  2618. s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
  2619. s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
  2620. s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
  2621. s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
  2622. if(!s->chroma_x_shift){//Chroma444
  2623. s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
  2624. s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
  2625. s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
  2626. s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
  2627. }
  2628. }
  2629. }//gray
  2630. }
  2631. }
  2632. skip_idct:
  2633. if(!readable){
  2634. s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
  2635. s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
  2636. s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
  2637. }
  2638. }
  2639. }
  2640. void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
  2641. #if !CONFIG_SMALL
  2642. if(s->out_format == FMT_MPEG1) {
  2643. if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
  2644. else MPV_decode_mb_internal(s, block, 0, 1);
  2645. } else
  2646. #endif
  2647. if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
  2648. else MPV_decode_mb_internal(s, block, 0, 0);
  2649. }
  2650. void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
  2651. {
  2652. ff_draw_horiz_band(s->avctx, &s->current_picture_ptr->f,
  2653. &s->last_picture_ptr->f, y, h, s->picture_structure,
  2654. s->first_field, s->low_delay);
  2655. }
  2656. void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
  2657. const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
  2658. const int uvlinesize = s->current_picture.f.linesize[1];
  2659. const int mb_size= 4 - s->avctx->lowres;
  2660. s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
  2661. s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
  2662. s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
  2663. s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
  2664. s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
  2665. s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
  2666. //block_index is not used by mpeg2, so it is not affected by chroma_format
  2667. s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
  2668. s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
  2669. s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
  2670. if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
  2671. {
  2672. if(s->picture_structure==PICT_FRAME){
  2673. s->dest[0] += s->mb_y * linesize << mb_size;
  2674. s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
  2675. s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
  2676. }else{
  2677. s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
  2678. s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
  2679. s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
  2680. av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
  2681. }
  2682. }
  2683. }
  2684. /**
  2685. * Permute an 8x8 block.
  2686. * @param block the block which will be permuted according to the given permutation vector
  2687. * @param permutation the permutation vector
  2688. * @param last the last non zero coefficient in scantable order, used to speed the permutation up
  2689. * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
  2690. * (inverse) permutated to scantable order!
  2691. */
  2692. void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
  2693. {
  2694. int i;
  2695. int16_t temp[64];
  2696. if(last<=0) return;
  2697. //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
  2698. for(i=0; i<=last; i++){
  2699. const int j= scantable[i];
  2700. temp[j]= block[j];
  2701. block[j]=0;
  2702. }
  2703. for(i=0; i<=last; i++){
  2704. const int j= scantable[i];
  2705. const int perm_j= permutation[j];
  2706. block[perm_j]= temp[j];
  2707. }
  2708. }
  2709. void ff_mpeg_flush(AVCodecContext *avctx){
  2710. int i;
  2711. MpegEncContext *s = avctx->priv_data;
  2712. if(s==NULL || s->picture==NULL)
  2713. return;
  2714. for (i = 0; i < MAX_PICTURE_COUNT; i++)
  2715. ff_mpeg_unref_picture(s, &s->picture[i]);
  2716. s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
  2717. ff_mpeg_unref_picture(s, &s->current_picture);
  2718. ff_mpeg_unref_picture(s, &s->last_picture);
  2719. ff_mpeg_unref_picture(s, &s->next_picture);
  2720. s->mb_x= s->mb_y= 0;
  2721. s->closed_gop= 0;
  2722. s->parse_context.state= -1;
  2723. s->parse_context.frame_start_found= 0;
  2724. s->parse_context.overread= 0;
  2725. s->parse_context.overread_index= 0;
  2726. s->parse_context.index= 0;
  2727. s->parse_context.last_index= 0;
  2728. s->bitstream_buffer_size=0;
  2729. s->pp_time=0;
  2730. }
  2731. /**
  2732. * set qscale and update qscale dependent variables.
  2733. */
  2734. void ff_set_qscale(MpegEncContext * s, int qscale)
  2735. {
  2736. if (qscale < 1)
  2737. qscale = 1;
  2738. else if (qscale > 31)
  2739. qscale = 31;
  2740. s->qscale = qscale;
  2741. s->chroma_qscale= s->chroma_qscale_table[qscale];
  2742. s->y_dc_scale= s->y_dc_scale_table[ qscale ];
  2743. s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
  2744. }
  2745. void ff_MPV_report_decode_progress(MpegEncContext *s)
  2746. {
  2747. if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
  2748. ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
  2749. }
  2750. #if CONFIG_ERROR_RESILIENCE
  2751. void ff_mpeg_set_erpic(ERPicture *dst, Picture *src)
  2752. {
  2753. int i;
  2754. memset(dst, 0, sizeof(*dst));
  2755. if (!src)
  2756. return;
  2757. dst->f = &src->f;
  2758. dst->tf = &src->tf;
  2759. for (i = 0; i < 2; i++) {
  2760. dst->motion_val[i] = src->motion_val[i];
  2761. dst->ref_index[i] = src->ref_index[i];
  2762. }
  2763. dst->mb_type = src->mb_type;
  2764. dst->field_picture = src->field_picture;
  2765. }
  2766. void ff_mpeg_er_frame_start(MpegEncContext *s)
  2767. {
  2768. ERContext *er = &s->er;
  2769. ff_mpeg_set_erpic(&er->cur_pic, s->current_picture_ptr);
  2770. ff_mpeg_set_erpic(&er->next_pic, s->next_picture_ptr);
  2771. ff_mpeg_set_erpic(&er->last_pic, s->last_picture_ptr);
  2772. er->pp_time = s->pp_time;
  2773. er->pb_time = s->pb_time;
  2774. er->quarter_sample = s->quarter_sample;
  2775. er->partitioned_frame = s->partitioned_frame;
  2776. ff_er_frame_start(er);
  2777. }
  2778. #endif /* CONFIG_ERROR_RESILIENCE */