You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2350 lines
86KB

  1. /*
  2. * The simplest mpeg encoder (well, it was the simplest!)
  3. * Copyright (c) 2000,2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
  7. *
  8. * This file is part of FFmpeg.
  9. *
  10. * FFmpeg is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * FFmpeg is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with FFmpeg; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. /**
  25. * @file
  26. * The simplest mpeg encoder (well, it was the simplest!).
  27. */
  28. #include "libavutil/attributes.h"
  29. #include "libavutil/avassert.h"
  30. #include "libavutil/imgutils.h"
  31. #include "libavutil/internal.h"
  32. #include "libavutil/motion_vector.h"
  33. #include "libavutil/video_enc_params.h"
  34. #include "avcodec.h"
  35. #include "blockdsp.h"
  36. #include "h264chroma.h"
  37. #include "idctdsp.h"
  38. #include "internal.h"
  39. #include "mathops.h"
  40. #include "mpeg_er.h"
  41. #include "mpegutils.h"
  42. #include "mpegvideo.h"
  43. #include "mpegvideodata.h"
  44. #include "mjpegenc.h"
  45. #include "msmpeg4.h"
  46. #include "qpeldsp.h"
  47. #include "thread.h"
  48. #include "wmv2.h"
  49. #include <limits.h>
  50. static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
  51. int16_t *block, int n, int qscale)
  52. {
  53. int i, level, nCoeffs;
  54. const uint16_t *quant_matrix;
  55. nCoeffs= s->block_last_index[n];
  56. block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
  57. /* XXX: only MPEG-1 */
  58. quant_matrix = s->intra_matrix;
  59. for(i=1;i<=nCoeffs;i++) {
  60. int j= s->intra_scantable.permutated[i];
  61. level = block[j];
  62. if (level) {
  63. if (level < 0) {
  64. level = -level;
  65. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  66. level = (level - 1) | 1;
  67. level = -level;
  68. } else {
  69. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  70. level = (level - 1) | 1;
  71. }
  72. block[j] = level;
  73. }
  74. }
  75. }
  76. static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
  77. int16_t *block, int n, int qscale)
  78. {
  79. int i, level, nCoeffs;
  80. const uint16_t *quant_matrix;
  81. nCoeffs= s->block_last_index[n];
  82. quant_matrix = s->inter_matrix;
  83. for(i=0; i<=nCoeffs; i++) {
  84. int j= s->intra_scantable.permutated[i];
  85. level = block[j];
  86. if (level) {
  87. if (level < 0) {
  88. level = -level;
  89. level = (((level << 1) + 1) * qscale *
  90. ((int) (quant_matrix[j]))) >> 4;
  91. level = (level - 1) | 1;
  92. level = -level;
  93. } else {
  94. level = (((level << 1) + 1) * qscale *
  95. ((int) (quant_matrix[j]))) >> 4;
  96. level = (level - 1) | 1;
  97. }
  98. block[j] = level;
  99. }
  100. }
  101. }
  102. static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
  103. int16_t *block, int n, int qscale)
  104. {
  105. int i, level, nCoeffs;
  106. const uint16_t *quant_matrix;
  107. if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
  108. else qscale <<= 1;
  109. if(s->alternate_scan) nCoeffs= 63;
  110. else nCoeffs= s->block_last_index[n];
  111. block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
  112. quant_matrix = s->intra_matrix;
  113. for(i=1;i<=nCoeffs;i++) {
  114. int j= s->intra_scantable.permutated[i];
  115. level = block[j];
  116. if (level) {
  117. if (level < 0) {
  118. level = -level;
  119. level = (int)(level * qscale * quant_matrix[j]) >> 4;
  120. level = -level;
  121. } else {
  122. level = (int)(level * qscale * quant_matrix[j]) >> 4;
  123. }
  124. block[j] = level;
  125. }
  126. }
  127. }
  128. static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
  129. int16_t *block, int n, int qscale)
  130. {
  131. int i, level, nCoeffs;
  132. const uint16_t *quant_matrix;
  133. int sum=-1;
  134. if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
  135. else qscale <<= 1;
  136. if(s->alternate_scan) nCoeffs= 63;
  137. else nCoeffs= s->block_last_index[n];
  138. block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
  139. sum += block[0];
  140. quant_matrix = s->intra_matrix;
  141. for(i=1;i<=nCoeffs;i++) {
  142. int j= s->intra_scantable.permutated[i];
  143. level = block[j];
  144. if (level) {
  145. if (level < 0) {
  146. level = -level;
  147. level = (int)(level * qscale * quant_matrix[j]) >> 4;
  148. level = -level;
  149. } else {
  150. level = (int)(level * qscale * quant_matrix[j]) >> 4;
  151. }
  152. block[j] = level;
  153. sum+=level;
  154. }
  155. }
  156. block[63]^=sum&1;
  157. }
  158. static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
  159. int16_t *block, int n, int qscale)
  160. {
  161. int i, level, nCoeffs;
  162. const uint16_t *quant_matrix;
  163. int sum=-1;
  164. if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
  165. else qscale <<= 1;
  166. if(s->alternate_scan) nCoeffs= 63;
  167. else nCoeffs= s->block_last_index[n];
  168. quant_matrix = s->inter_matrix;
  169. for(i=0; i<=nCoeffs; i++) {
  170. int j= s->intra_scantable.permutated[i];
  171. level = block[j];
  172. if (level) {
  173. if (level < 0) {
  174. level = -level;
  175. level = (((level << 1) + 1) * qscale *
  176. ((int) (quant_matrix[j]))) >> 5;
  177. level = -level;
  178. } else {
  179. level = (((level << 1) + 1) * qscale *
  180. ((int) (quant_matrix[j]))) >> 5;
  181. }
  182. block[j] = level;
  183. sum+=level;
  184. }
  185. }
  186. block[63]^=sum&1;
  187. }
  188. static void dct_unquantize_h263_intra_c(MpegEncContext *s,
  189. int16_t *block, int n, int qscale)
  190. {
  191. int i, level, qmul, qadd;
  192. int nCoeffs;
  193. av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
  194. qmul = qscale << 1;
  195. if (!s->h263_aic) {
  196. block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
  197. qadd = (qscale - 1) | 1;
  198. }else{
  199. qadd = 0;
  200. }
  201. if(s->ac_pred)
  202. nCoeffs=63;
  203. else
  204. nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
  205. for(i=1; i<=nCoeffs; i++) {
  206. level = block[i];
  207. if (level) {
  208. if (level < 0) {
  209. level = level * qmul - qadd;
  210. } else {
  211. level = level * qmul + qadd;
  212. }
  213. block[i] = level;
  214. }
  215. }
  216. }
  217. static void dct_unquantize_h263_inter_c(MpegEncContext *s,
  218. int16_t *block, int n, int qscale)
  219. {
  220. int i, level, qmul, qadd;
  221. int nCoeffs;
  222. av_assert2(s->block_last_index[n]>=0);
  223. qadd = (qscale - 1) | 1;
  224. qmul = qscale << 1;
  225. nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
  226. for(i=0; i<=nCoeffs; i++) {
  227. level = block[i];
  228. if (level) {
  229. if (level < 0) {
  230. level = level * qmul - qadd;
  231. } else {
  232. level = level * qmul + qadd;
  233. }
  234. block[i] = level;
  235. }
  236. }
  237. }
  238. static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
  239. {
  240. while(h--)
  241. memset(dst + h*linesize, 128, 16);
  242. }
  243. static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
  244. {
  245. while(h--)
  246. memset(dst + h*linesize, 128, 8);
  247. }
  248. /* init common dct for both encoder and decoder */
  249. static av_cold int dct_init(MpegEncContext *s)
  250. {
  251. ff_blockdsp_init(&s->bdsp, s->avctx);
  252. ff_h264chroma_init(&s->h264chroma, 8); //for lowres
  253. ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
  254. ff_mpegvideodsp_init(&s->mdsp);
  255. ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
  256. if (s->avctx->debug & FF_DEBUG_NOMC) {
  257. int i;
  258. for (i=0; i<4; i++) {
  259. s->hdsp.avg_pixels_tab[0][i] = gray16;
  260. s->hdsp.put_pixels_tab[0][i] = gray16;
  261. s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
  262. s->hdsp.avg_pixels_tab[1][i] = gray8;
  263. s->hdsp.put_pixels_tab[1][i] = gray8;
  264. s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
  265. }
  266. }
  267. s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
  268. s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
  269. s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
  270. s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
  271. s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
  272. if (s->avctx->flags & AV_CODEC_FLAG_BITEXACT)
  273. s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
  274. s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
  275. if (HAVE_INTRINSICS_NEON)
  276. ff_mpv_common_init_neon(s);
  277. if (ARCH_ALPHA)
  278. ff_mpv_common_init_axp(s);
  279. if (ARCH_ARM)
  280. ff_mpv_common_init_arm(s);
  281. if (ARCH_PPC)
  282. ff_mpv_common_init_ppc(s);
  283. if (ARCH_X86)
  284. ff_mpv_common_init_x86(s);
  285. if (ARCH_MIPS)
  286. ff_mpv_common_init_mips(s);
  287. return 0;
  288. }
  289. av_cold void ff_mpv_idct_init(MpegEncContext *s)
  290. {
  291. if (s->codec_id == AV_CODEC_ID_MPEG4)
  292. s->idsp.mpeg4_studio_profile = s->studio_profile;
  293. ff_idctdsp_init(&s->idsp, s->avctx);
  294. /* load & permutate scantables
  295. * note: only wmv uses different ones
  296. */
  297. if (s->alternate_scan) {
  298. ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
  299. ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
  300. } else {
  301. ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
  302. ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
  303. }
  304. ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
  305. ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
  306. }
  307. static int alloc_picture(MpegEncContext *s, Picture *pic)
  308. {
  309. return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, 0, 0,
  310. s->chroma_x_shift, s->chroma_y_shift, s->out_format,
  311. s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
  312. &s->linesize, &s->uvlinesize);
  313. }
  314. static int init_duplicate_context(MpegEncContext *s)
  315. {
  316. int y_size = s->b8_stride * (2 * s->mb_height + 1);
  317. int c_size = s->mb_stride * (s->mb_height + 1);
  318. int yc_size = y_size + 2 * c_size;
  319. int i;
  320. if (s->mb_height & 1)
  321. yc_size += 2*s->b8_stride + 2*s->mb_stride;
  322. if (s->encoding) {
  323. if (!FF_ALLOCZ_TYPED_ARRAY(s->me.map, ME_MAP_SIZE) ||
  324. !FF_ALLOCZ_TYPED_ARRAY(s->me.score_map, ME_MAP_SIZE))
  325. return AVERROR(ENOMEM);
  326. if (s->noise_reduction) {
  327. if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_error_sum, 2))
  328. return AVERROR(ENOMEM);
  329. }
  330. }
  331. if (!FF_ALLOCZ_TYPED_ARRAY(s->blocks, 2))
  332. return AVERROR(ENOMEM);
  333. s->block = s->blocks[0];
  334. for (i = 0; i < 12; i++) {
  335. s->pblocks[i] = &s->block[i];
  336. }
  337. if (!(s->block32 = av_mallocz(sizeof(*s->block32))) ||
  338. !(s->dpcm_macroblock = av_mallocz(sizeof(*s->dpcm_macroblock))))
  339. return AVERROR(ENOMEM);
  340. s->dpcm_direction = 0;
  341. if (s->avctx->codec_tag == AV_RL32("VCR2")) {
  342. // exchange uv
  343. FFSWAP(void *, s->pblocks[4], s->pblocks[5]);
  344. }
  345. if (s->out_format == FMT_H263) {
  346. /* ac values */
  347. if (!FF_ALLOCZ_TYPED_ARRAY(s->ac_val_base, yc_size))
  348. return AVERROR(ENOMEM);
  349. s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
  350. s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
  351. s->ac_val[2] = s->ac_val[1] + c_size;
  352. }
  353. return 0;
  354. }
  355. /**
  356. * Initialize an MpegEncContext's thread contexts. Presumes that
  357. * slice_context_count is already set and that all the fields
  358. * that are freed/reset in free_duplicate_context() are NULL.
  359. */
  360. static int init_duplicate_contexts(MpegEncContext *s)
  361. {
  362. int nb_slices = s->slice_context_count, ret;
  363. /* We initialize the copies before the original so that
  364. * fields allocated in init_duplicate_context are NULL after
  365. * copying. This prevents double-frees upon allocation error. */
  366. for (int i = 1; i < nb_slices; i++) {
  367. s->thread_context[i] = av_memdup(s, sizeof(MpegEncContext));
  368. if (!s->thread_context[i])
  369. return AVERROR(ENOMEM);
  370. if ((ret = init_duplicate_context(s->thread_context[i])) < 0)
  371. return ret;
  372. s->thread_context[i]->start_mb_y =
  373. (s->mb_height * (i ) + nb_slices / 2) / nb_slices;
  374. s->thread_context[i]->end_mb_y =
  375. (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
  376. }
  377. s->start_mb_y = 0;
  378. s->end_mb_y = nb_slices > 1 ? (s->mb_height + nb_slices / 2) / nb_slices
  379. : s->mb_height;
  380. return init_duplicate_context(s);
  381. }
  382. static void free_duplicate_context(MpegEncContext *s)
  383. {
  384. if (!s)
  385. return;
  386. av_freep(&s->sc.edge_emu_buffer);
  387. av_freep(&s->me.scratchpad);
  388. s->me.temp =
  389. s->sc.rd_scratchpad =
  390. s->sc.b_scratchpad =
  391. s->sc.obmc_scratchpad = NULL;
  392. av_freep(&s->dct_error_sum);
  393. av_freep(&s->me.map);
  394. av_freep(&s->me.score_map);
  395. av_freep(&s->blocks);
  396. av_freep(&s->block32);
  397. av_freep(&s->dpcm_macroblock);
  398. av_freep(&s->ac_val_base);
  399. s->block = NULL;
  400. }
  401. static void free_duplicate_contexts(MpegEncContext *s)
  402. {
  403. for (int i = 1; i < s->slice_context_count; i++) {
  404. free_duplicate_context(s->thread_context[i]);
  405. av_freep(&s->thread_context[i]);
  406. }
  407. free_duplicate_context(s);
  408. }
  409. static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
  410. {
  411. #define COPY(a) bak->a = src->a
  412. COPY(sc.edge_emu_buffer);
  413. COPY(me.scratchpad);
  414. COPY(me.temp);
  415. COPY(sc.rd_scratchpad);
  416. COPY(sc.b_scratchpad);
  417. COPY(sc.obmc_scratchpad);
  418. COPY(me.map);
  419. COPY(me.score_map);
  420. COPY(blocks);
  421. COPY(block);
  422. COPY(block32);
  423. COPY(dpcm_macroblock);
  424. COPY(dpcm_direction);
  425. COPY(start_mb_y);
  426. COPY(end_mb_y);
  427. COPY(me.map_generation);
  428. COPY(pb);
  429. COPY(dct_error_sum);
  430. COPY(dct_count[0]);
  431. COPY(dct_count[1]);
  432. COPY(ac_val_base);
  433. COPY(ac_val[0]);
  434. COPY(ac_val[1]);
  435. COPY(ac_val[2]);
  436. #undef COPY
  437. }
  438. int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
  439. {
  440. MpegEncContext bak;
  441. int i, ret;
  442. // FIXME copy only needed parts
  443. backup_duplicate_context(&bak, dst);
  444. memcpy(dst, src, sizeof(MpegEncContext));
  445. backup_duplicate_context(dst, &bak);
  446. for (i = 0; i < 12; i++) {
  447. dst->pblocks[i] = &dst->block[i];
  448. }
  449. if (dst->avctx->codec_tag == AV_RL32("VCR2")) {
  450. // exchange uv
  451. FFSWAP(void *, dst->pblocks[4], dst->pblocks[5]);
  452. }
  453. if (!dst->sc.edge_emu_buffer &&
  454. (ret = ff_mpeg_framesize_alloc(dst->avctx, &dst->me,
  455. &dst->sc, dst->linesize)) < 0) {
  456. av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
  457. "scratch buffers.\n");
  458. return ret;
  459. }
  460. return 0;
  461. }
  462. int ff_mpeg_update_thread_context(AVCodecContext *dst,
  463. const AVCodecContext *src)
  464. {
  465. int i, ret;
  466. MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
  467. if (dst == src)
  468. return 0;
  469. av_assert0(s != s1);
  470. // FIXME can parameters change on I-frames?
  471. // in that case dst may need a reinit
  472. if (!s->context_initialized) {
  473. int err;
  474. memcpy(s, s1, sizeof(MpegEncContext));
  475. s->avctx = dst;
  476. s->bitstream_buffer = NULL;
  477. s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
  478. if (s1->context_initialized){
  479. // s->picture_range_start += MAX_PICTURE_COUNT;
  480. // s->picture_range_end += MAX_PICTURE_COUNT;
  481. ff_mpv_idct_init(s);
  482. if((err = ff_mpv_common_init(s)) < 0){
  483. memset(s, 0, sizeof(MpegEncContext));
  484. s->avctx = dst;
  485. return err;
  486. }
  487. }
  488. }
  489. if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
  490. s->height = s1->height;
  491. s->width = s1->width;
  492. if ((ret = ff_mpv_common_frame_size_change(s)) < 0)
  493. return ret;
  494. }
  495. s->avctx->coded_height = s1->avctx->coded_height;
  496. s->avctx->coded_width = s1->avctx->coded_width;
  497. s->avctx->width = s1->avctx->width;
  498. s->avctx->height = s1->avctx->height;
  499. s->quarter_sample = s1->quarter_sample;
  500. s->coded_picture_number = s1->coded_picture_number;
  501. s->picture_number = s1->picture_number;
  502. av_assert0(!s->picture || s->picture != s1->picture);
  503. if(s->picture)
  504. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  505. ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
  506. if (s1->picture && s1->picture[i].f->buf[0] &&
  507. (ret = ff_mpeg_ref_picture(s->avctx, &s->picture[i], &s1->picture[i])) < 0)
  508. return ret;
  509. }
  510. #define UPDATE_PICTURE(pic)\
  511. do {\
  512. ff_mpeg_unref_picture(s->avctx, &s->pic);\
  513. if (s1->pic.f && s1->pic.f->buf[0])\
  514. ret = ff_mpeg_ref_picture(s->avctx, &s->pic, &s1->pic);\
  515. else\
  516. ret = ff_update_picture_tables(&s->pic, &s1->pic);\
  517. if (ret < 0)\
  518. return ret;\
  519. } while (0)
  520. UPDATE_PICTURE(current_picture);
  521. UPDATE_PICTURE(last_picture);
  522. UPDATE_PICTURE(next_picture);
  523. #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
  524. ((pic && pic >= old_ctx->picture && \
  525. pic < old_ctx->picture + MAX_PICTURE_COUNT) ? \
  526. &new_ctx->picture[pic - old_ctx->picture] : NULL)
  527. s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
  528. s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
  529. s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
  530. // Error/bug resilience
  531. s->next_p_frame_damaged = s1->next_p_frame_damaged;
  532. s->workaround_bugs = s1->workaround_bugs;
  533. s->padding_bug_score = s1->padding_bug_score;
  534. // MPEG-4 timing info
  535. memcpy(&s->last_time_base, &s1->last_time_base,
  536. (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
  537. (char *) &s1->last_time_base);
  538. // B-frame info
  539. s->max_b_frames = s1->max_b_frames;
  540. s->low_delay = s1->low_delay;
  541. s->droppable = s1->droppable;
  542. // DivX handling (doesn't work)
  543. s->divx_packed = s1->divx_packed;
  544. if (s1->bitstream_buffer) {
  545. if (s1->bitstream_buffer_size +
  546. AV_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size) {
  547. av_fast_malloc(&s->bitstream_buffer,
  548. &s->allocated_bitstream_buffer_size,
  549. s1->allocated_bitstream_buffer_size);
  550. if (!s->bitstream_buffer) {
  551. s->bitstream_buffer_size = 0;
  552. return AVERROR(ENOMEM);
  553. }
  554. }
  555. s->bitstream_buffer_size = s1->bitstream_buffer_size;
  556. memcpy(s->bitstream_buffer, s1->bitstream_buffer,
  557. s1->bitstream_buffer_size);
  558. memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
  559. AV_INPUT_BUFFER_PADDING_SIZE);
  560. }
  561. // linesize-dependent scratch buffer allocation
  562. if (!s->sc.edge_emu_buffer)
  563. if (s1->linesize) {
  564. if (ff_mpeg_framesize_alloc(s->avctx, &s->me,
  565. &s->sc, s1->linesize) < 0) {
  566. av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
  567. "scratch buffers.\n");
  568. return AVERROR(ENOMEM);
  569. }
  570. } else {
  571. av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
  572. "be allocated due to unknown size.\n");
  573. }
  574. // MPEG-2/interlacing info
  575. memcpy(&s->progressive_sequence, &s1->progressive_sequence,
  576. (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
  577. if (!s1->first_field) {
  578. s->last_pict_type = s1->pict_type;
  579. if (s1->current_picture_ptr)
  580. s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
  581. }
  582. return 0;
  583. }
  584. /**
  585. * Set the given MpegEncContext to common defaults
  586. * (same for encoding and decoding).
  587. * The changed fields will not depend upon the
  588. * prior state of the MpegEncContext.
  589. */
  590. void ff_mpv_common_defaults(MpegEncContext *s)
  591. {
  592. s->y_dc_scale_table =
  593. s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
  594. s->chroma_qscale_table = ff_default_chroma_qscale_table;
  595. s->progressive_frame = 1;
  596. s->progressive_sequence = 1;
  597. s->picture_structure = PICT_FRAME;
  598. s->coded_picture_number = 0;
  599. s->picture_number = 0;
  600. s->f_code = 1;
  601. s->b_code = 1;
  602. s->slice_context_count = 1;
  603. }
  604. /**
  605. * Initialize the given MpegEncContext for decoding.
  606. * the changed fields will not depend upon
  607. * the prior state of the MpegEncContext.
  608. */
  609. void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
  610. {
  611. ff_mpv_common_defaults(s);
  612. s->avctx = avctx;
  613. s->width = avctx->coded_width;
  614. s->height = avctx->coded_height;
  615. s->codec_id = avctx->codec->id;
  616. s->workaround_bugs = avctx->workaround_bugs;
  617. /* convert fourcc to upper case */
  618. s->codec_tag = avpriv_toupper4(avctx->codec_tag);
  619. }
  620. /**
  621. * Initialize and allocates MpegEncContext fields dependent on the resolution.
  622. */
  623. static int init_context_frame(MpegEncContext *s)
  624. {
  625. int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
  626. s->mb_width = (s->width + 15) / 16;
  627. s->mb_stride = s->mb_width + 1;
  628. s->b8_stride = s->mb_width * 2 + 1;
  629. mb_array_size = s->mb_height * s->mb_stride;
  630. mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
  631. /* set default edge pos, will be overridden
  632. * in decode_header if needed */
  633. s->h_edge_pos = s->mb_width * 16;
  634. s->v_edge_pos = s->mb_height * 16;
  635. s->mb_num = s->mb_width * s->mb_height;
  636. s->block_wrap[0] =
  637. s->block_wrap[1] =
  638. s->block_wrap[2] =
  639. s->block_wrap[3] = s->b8_stride;
  640. s->block_wrap[4] =
  641. s->block_wrap[5] = s->mb_stride;
  642. y_size = s->b8_stride * (2 * s->mb_height + 1);
  643. c_size = s->mb_stride * (s->mb_height + 1);
  644. yc_size = y_size + 2 * c_size;
  645. if (s->mb_height & 1)
  646. yc_size += 2*s->b8_stride + 2*s->mb_stride;
  647. if (!FF_ALLOCZ_TYPED_ARRAY(s->mb_index2xy, s->mb_num + 1))
  648. return AVERROR(ENOMEM);
  649. for (y = 0; y < s->mb_height; y++)
  650. for (x = 0; x < s->mb_width; x++)
  651. s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
  652. s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
  653. if (s->encoding) {
  654. /* Allocate MV tables */
  655. if (!FF_ALLOCZ_TYPED_ARRAY(s->p_mv_table_base, mv_table_size) ||
  656. !FF_ALLOCZ_TYPED_ARRAY(s->b_forw_mv_table_base, mv_table_size) ||
  657. !FF_ALLOCZ_TYPED_ARRAY(s->b_back_mv_table_base, mv_table_size) ||
  658. !FF_ALLOCZ_TYPED_ARRAY(s->b_bidir_forw_mv_table_base, mv_table_size) ||
  659. !FF_ALLOCZ_TYPED_ARRAY(s->b_bidir_back_mv_table_base, mv_table_size) ||
  660. !FF_ALLOCZ_TYPED_ARRAY(s->b_direct_mv_table_base, mv_table_size))
  661. return AVERROR(ENOMEM);
  662. s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
  663. s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
  664. s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
  665. s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
  666. s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
  667. s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
  668. /* Allocate MB type table */
  669. if (!FF_ALLOCZ_TYPED_ARRAY(s->mb_type, mb_array_size) ||
  670. !FF_ALLOCZ_TYPED_ARRAY(s->lambda_table, mb_array_size) ||
  671. !FF_ALLOC_TYPED_ARRAY (s->cplx_tab, mb_array_size) ||
  672. !FF_ALLOC_TYPED_ARRAY (s->bits_tab, mb_array_size))
  673. return AVERROR(ENOMEM);
  674. }
  675. if (s->codec_id == AV_CODEC_ID_MPEG4 ||
  676. (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
  677. /* interlaced direct mode decoding tables */
  678. for (i = 0; i < 2; i++) {
  679. int j, k;
  680. for (j = 0; j < 2; j++) {
  681. for (k = 0; k < 2; k++) {
  682. if (!FF_ALLOCZ_TYPED_ARRAY(s->b_field_mv_table_base[i][j][k], mv_table_size))
  683. return AVERROR(ENOMEM);
  684. s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
  685. s->mb_stride + 1;
  686. }
  687. if (!FF_ALLOCZ_TYPED_ARRAY(s->b_field_select_table [i][j], mv_table_size * 2) ||
  688. !FF_ALLOCZ_TYPED_ARRAY(s->p_field_mv_table_base[i][j], mv_table_size))
  689. return AVERROR(ENOMEM);
  690. s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
  691. }
  692. if (!FF_ALLOCZ_TYPED_ARRAY(s->p_field_select_table[i], mv_table_size * 2))
  693. return AVERROR(ENOMEM);
  694. }
  695. }
  696. if (s->out_format == FMT_H263) {
  697. /* cbp values, cbp, ac_pred, pred_dir */
  698. if (!FF_ALLOCZ_TYPED_ARRAY(s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride) ||
  699. !FF_ALLOCZ_TYPED_ARRAY(s->cbp_table, mb_array_size) ||
  700. !FF_ALLOCZ_TYPED_ARRAY(s->pred_dir_table, mb_array_size))
  701. return AVERROR(ENOMEM);
  702. s->coded_block = s->coded_block_base + s->b8_stride + 1;
  703. }
  704. if (s->h263_pred || s->h263_plus || !s->encoding) {
  705. /* dc values */
  706. // MN: we need these for error resilience of intra-frames
  707. if (!FF_ALLOCZ_TYPED_ARRAY(s->dc_val_base, yc_size))
  708. return AVERROR(ENOMEM);
  709. s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
  710. s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
  711. s->dc_val[2] = s->dc_val[1] + c_size;
  712. for (i = 0; i < yc_size; i++)
  713. s->dc_val_base[i] = 1024;
  714. }
  715. /* which mb is an intra block, init macroblock skip table */
  716. if (!FF_ALLOC_TYPED_ARRAY(s->mbintra_table, mb_array_size) ||
  717. // Note the + 1 is for a quicker MPEG-4 slice_end detection
  718. !FF_ALLOCZ_TYPED_ARRAY(s->mbskip_table, mb_array_size + 2))
  719. return AVERROR(ENOMEM);
  720. memset(s->mbintra_table, 1, mb_array_size);
  721. return ff_mpeg_er_init(s);
  722. }
  723. static void clear_context(MpegEncContext *s)
  724. {
  725. int i, j, k;
  726. memset(&s->next_picture, 0, sizeof(s->next_picture));
  727. memset(&s->last_picture, 0, sizeof(s->last_picture));
  728. memset(&s->current_picture, 0, sizeof(s->current_picture));
  729. memset(&s->new_picture, 0, sizeof(s->new_picture));
  730. memset(s->thread_context, 0, sizeof(s->thread_context));
  731. s->me.map = NULL;
  732. s->me.score_map = NULL;
  733. s->dct_error_sum = NULL;
  734. s->block = NULL;
  735. s->blocks = NULL;
  736. s->block32 = NULL;
  737. memset(s->pblocks, 0, sizeof(s->pblocks));
  738. s->dpcm_direction = 0;
  739. s->dpcm_macroblock = NULL;
  740. s->ac_val_base = NULL;
  741. s->ac_val[0] =
  742. s->ac_val[1] =
  743. s->ac_val[2] =NULL;
  744. s->sc.edge_emu_buffer = NULL;
  745. s->me.scratchpad = NULL;
  746. s->me.temp =
  747. s->sc.rd_scratchpad =
  748. s->sc.b_scratchpad =
  749. s->sc.obmc_scratchpad = NULL;
  750. s->bitstream_buffer = NULL;
  751. s->allocated_bitstream_buffer_size = 0;
  752. s->picture = NULL;
  753. s->mb_type = NULL;
  754. s->p_mv_table_base = NULL;
  755. s->b_forw_mv_table_base = NULL;
  756. s->b_back_mv_table_base = NULL;
  757. s->b_bidir_forw_mv_table_base = NULL;
  758. s->b_bidir_back_mv_table_base = NULL;
  759. s->b_direct_mv_table_base = NULL;
  760. s->p_mv_table = NULL;
  761. s->b_forw_mv_table = NULL;
  762. s->b_back_mv_table = NULL;
  763. s->b_bidir_forw_mv_table = NULL;
  764. s->b_bidir_back_mv_table = NULL;
  765. s->b_direct_mv_table = NULL;
  766. for (i = 0; i < 2; i++) {
  767. for (j = 0; j < 2; j++) {
  768. for (k = 0; k < 2; k++) {
  769. s->b_field_mv_table_base[i][j][k] = NULL;
  770. s->b_field_mv_table[i][j][k] = NULL;
  771. }
  772. s->b_field_select_table[i][j] = NULL;
  773. s->p_field_mv_table_base[i][j] = NULL;
  774. s->p_field_mv_table[i][j] = NULL;
  775. }
  776. s->p_field_select_table[i] = NULL;
  777. }
  778. s->dc_val_base = NULL;
  779. s->coded_block_base = NULL;
  780. s->mbintra_table = NULL;
  781. s->cbp_table = NULL;
  782. s->pred_dir_table = NULL;
  783. s->mbskip_table = NULL;
  784. s->er.error_status_table = NULL;
  785. s->er.er_temp_buffer = NULL;
  786. s->mb_index2xy = NULL;
  787. s->lambda_table = NULL;
  788. s->cplx_tab = NULL;
  789. s->bits_tab = NULL;
  790. }
  791. /**
  792. * init common structure for both encoder and decoder.
  793. * this assumes that some variables like width/height are already set
  794. */
  795. av_cold int ff_mpv_common_init(MpegEncContext *s)
  796. {
  797. int i, ret;
  798. int nb_slices = (HAVE_THREADS &&
  799. s->avctx->active_thread_type & FF_THREAD_SLICE) ?
  800. s->avctx->thread_count : 1;
  801. clear_context(s);
  802. if (s->encoding && s->avctx->slices)
  803. nb_slices = s->avctx->slices;
  804. if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
  805. s->mb_height = (s->height + 31) / 32 * 2;
  806. else
  807. s->mb_height = (s->height + 15) / 16;
  808. if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
  809. av_log(s->avctx, AV_LOG_ERROR,
  810. "decoding to AV_PIX_FMT_NONE is not supported.\n");
  811. return AVERROR(EINVAL);
  812. }
  813. if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
  814. int max_slices;
  815. if (s->mb_height)
  816. max_slices = FFMIN(MAX_THREADS, s->mb_height);
  817. else
  818. max_slices = MAX_THREADS;
  819. av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
  820. " reducing to %d\n", nb_slices, max_slices);
  821. nb_slices = max_slices;
  822. }
  823. if ((s->width || s->height) &&
  824. av_image_check_size(s->width, s->height, 0, s->avctx))
  825. return AVERROR(EINVAL);
  826. dct_init(s);
  827. /* set chroma shifts */
  828. ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
  829. &s->chroma_x_shift,
  830. &s->chroma_y_shift);
  831. if (ret)
  832. return ret;
  833. if (!FF_ALLOCZ_TYPED_ARRAY(s->picture, MAX_PICTURE_COUNT))
  834. return AVERROR(ENOMEM);
  835. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  836. s->picture[i].f = av_frame_alloc();
  837. if (!s->picture[i].f)
  838. goto fail_nomem;
  839. }
  840. if (!(s->next_picture.f = av_frame_alloc()) ||
  841. !(s->last_picture.f = av_frame_alloc()) ||
  842. !(s->current_picture.f = av_frame_alloc()) ||
  843. !(s->new_picture.f = av_frame_alloc()))
  844. goto fail_nomem;
  845. if ((ret = init_context_frame(s)))
  846. goto fail;
  847. s->parse_context.state = -1;
  848. s->context_initialized = 1;
  849. memset(s->thread_context, 0, sizeof(s->thread_context));
  850. s->thread_context[0] = s;
  851. s->slice_context_count = nb_slices;
  852. // if (s->width && s->height) {
  853. ret = init_duplicate_contexts(s);
  854. if (ret < 0)
  855. goto fail;
  856. // }
  857. return 0;
  858. fail_nomem:
  859. ret = AVERROR(ENOMEM);
  860. fail:
  861. ff_mpv_common_end(s);
  862. return ret;
  863. }
  864. /**
  865. * Frees and resets MpegEncContext fields depending on the resolution
  866. * as well as the slice thread contexts.
  867. * Is used during resolution changes to avoid a full reinitialization of the
  868. * codec.
  869. */
  870. static void free_context_frame(MpegEncContext *s)
  871. {
  872. int i, j, k;
  873. free_duplicate_contexts(s);
  874. av_freep(&s->mb_type);
  875. av_freep(&s->p_mv_table_base);
  876. av_freep(&s->b_forw_mv_table_base);
  877. av_freep(&s->b_back_mv_table_base);
  878. av_freep(&s->b_bidir_forw_mv_table_base);
  879. av_freep(&s->b_bidir_back_mv_table_base);
  880. av_freep(&s->b_direct_mv_table_base);
  881. s->p_mv_table = NULL;
  882. s->b_forw_mv_table = NULL;
  883. s->b_back_mv_table = NULL;
  884. s->b_bidir_forw_mv_table = NULL;
  885. s->b_bidir_back_mv_table = NULL;
  886. s->b_direct_mv_table = NULL;
  887. for (i = 0; i < 2; i++) {
  888. for (j = 0; j < 2; j++) {
  889. for (k = 0; k < 2; k++) {
  890. av_freep(&s->b_field_mv_table_base[i][j][k]);
  891. s->b_field_mv_table[i][j][k] = NULL;
  892. }
  893. av_freep(&s->b_field_select_table[i][j]);
  894. av_freep(&s->p_field_mv_table_base[i][j]);
  895. s->p_field_mv_table[i][j] = NULL;
  896. }
  897. av_freep(&s->p_field_select_table[i]);
  898. }
  899. av_freep(&s->dc_val_base);
  900. av_freep(&s->coded_block_base);
  901. av_freep(&s->mbintra_table);
  902. av_freep(&s->cbp_table);
  903. av_freep(&s->pred_dir_table);
  904. av_freep(&s->mbskip_table);
  905. av_freep(&s->er.error_status_table);
  906. av_freep(&s->er.er_temp_buffer);
  907. av_freep(&s->mb_index2xy);
  908. av_freep(&s->lambda_table);
  909. av_freep(&s->cplx_tab);
  910. av_freep(&s->bits_tab);
  911. s->linesize = s->uvlinesize = 0;
  912. }
  913. int ff_mpv_common_frame_size_change(MpegEncContext *s)
  914. {
  915. int i, err = 0;
  916. if (!s->context_initialized)
  917. return AVERROR(EINVAL);
  918. free_context_frame(s);
  919. if (s->picture)
  920. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  921. s->picture[i].needs_realloc = 1;
  922. }
  923. s->last_picture_ptr =
  924. s->next_picture_ptr =
  925. s->current_picture_ptr = NULL;
  926. // init
  927. if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
  928. s->mb_height = (s->height + 31) / 32 * 2;
  929. else
  930. s->mb_height = (s->height + 15) / 16;
  931. if ((s->width || s->height) &&
  932. (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
  933. goto fail;
  934. /* set chroma shifts */
  935. err = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
  936. &s->chroma_x_shift,
  937. &s->chroma_y_shift);
  938. if (err < 0)
  939. goto fail;
  940. if ((err = init_context_frame(s)))
  941. goto fail;
  942. memset(s->thread_context, 0, sizeof(s->thread_context));
  943. s->thread_context[0] = s;
  944. if (s->width && s->height) {
  945. err = init_duplicate_contexts(s);
  946. if (err < 0)
  947. goto fail;
  948. }
  949. s->context_reinit = 0;
  950. return 0;
  951. fail:
  952. free_context_frame(s);
  953. s->context_reinit = 1;
  954. return err;
  955. }
  956. /* init common structure for both encoder and decoder */
  957. void ff_mpv_common_end(MpegEncContext *s)
  958. {
  959. int i;
  960. if (!s)
  961. return;
  962. free_context_frame(s);
  963. if (s->slice_context_count > 1)
  964. s->slice_context_count = 1;
  965. av_freep(&s->parse_context.buffer);
  966. s->parse_context.buffer_size = 0;
  967. av_freep(&s->bitstream_buffer);
  968. s->allocated_bitstream_buffer_size = 0;
  969. if (!s->avctx)
  970. return;
  971. if (s->picture) {
  972. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  973. ff_free_picture_tables(&s->picture[i]);
  974. ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
  975. av_frame_free(&s->picture[i].f);
  976. }
  977. }
  978. av_freep(&s->picture);
  979. ff_free_picture_tables(&s->last_picture);
  980. ff_mpeg_unref_picture(s->avctx, &s->last_picture);
  981. av_frame_free(&s->last_picture.f);
  982. ff_free_picture_tables(&s->current_picture);
  983. ff_mpeg_unref_picture(s->avctx, &s->current_picture);
  984. av_frame_free(&s->current_picture.f);
  985. ff_free_picture_tables(&s->next_picture);
  986. ff_mpeg_unref_picture(s->avctx, &s->next_picture);
  987. av_frame_free(&s->next_picture.f);
  988. ff_free_picture_tables(&s->new_picture);
  989. ff_mpeg_unref_picture(s->avctx, &s->new_picture);
  990. av_frame_free(&s->new_picture.f);
  991. s->context_initialized = 0;
  992. s->context_reinit = 0;
  993. s->last_picture_ptr =
  994. s->next_picture_ptr =
  995. s->current_picture_ptr = NULL;
  996. s->linesize = s->uvlinesize = 0;
  997. }
  998. static void gray_frame(AVFrame *frame)
  999. {
  1000. int i, h_chroma_shift, v_chroma_shift;
  1001. av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
  1002. for(i=0; i<frame->height; i++)
  1003. memset(frame->data[0] + frame->linesize[0]*i, 0x80, frame->width);
  1004. for(i=0; i<AV_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
  1005. memset(frame->data[1] + frame->linesize[1]*i,
  1006. 0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
  1007. memset(frame->data[2] + frame->linesize[2]*i,
  1008. 0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
  1009. }
  1010. }
  1011. /**
  1012. * generic function called after decoding
  1013. * the header and before a frame is decoded.
  1014. */
  1015. int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
  1016. {
  1017. int i, ret;
  1018. Picture *pic;
  1019. s->mb_skipped = 0;
  1020. if (!ff_thread_can_start_frame(avctx)) {
  1021. av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
  1022. return -1;
  1023. }
  1024. /* mark & release old frames */
  1025. if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
  1026. s->last_picture_ptr != s->next_picture_ptr &&
  1027. s->last_picture_ptr->f->buf[0]) {
  1028. ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
  1029. }
  1030. /* release forgotten pictures */
  1031. /* if (MPEG-124 / H.263) */
  1032. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1033. if (&s->picture[i] != s->last_picture_ptr &&
  1034. &s->picture[i] != s->next_picture_ptr &&
  1035. s->picture[i].reference && !s->picture[i].needs_realloc) {
  1036. ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
  1037. }
  1038. }
  1039. ff_mpeg_unref_picture(s->avctx, &s->current_picture);
  1040. ff_mpeg_unref_picture(s->avctx, &s->last_picture);
  1041. ff_mpeg_unref_picture(s->avctx, &s->next_picture);
  1042. /* release non reference frames */
  1043. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1044. if (!s->picture[i].reference)
  1045. ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
  1046. }
  1047. if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
  1048. // we already have an unused image
  1049. // (maybe it was set before reading the header)
  1050. pic = s->current_picture_ptr;
  1051. } else {
  1052. i = ff_find_unused_picture(s->avctx, s->picture, 0);
  1053. if (i < 0) {
  1054. av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
  1055. return i;
  1056. }
  1057. pic = &s->picture[i];
  1058. }
  1059. pic->reference = 0;
  1060. if (!s->droppable) {
  1061. if (s->pict_type != AV_PICTURE_TYPE_B)
  1062. pic->reference = 3;
  1063. }
  1064. pic->f->coded_picture_number = s->coded_picture_number++;
  1065. if (alloc_picture(s, pic) < 0)
  1066. return -1;
  1067. s->current_picture_ptr = pic;
  1068. // FIXME use only the vars from current_pic
  1069. s->current_picture_ptr->f->top_field_first = s->top_field_first;
  1070. if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
  1071. s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
  1072. if (s->picture_structure != PICT_FRAME)
  1073. s->current_picture_ptr->f->top_field_first =
  1074. (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
  1075. }
  1076. s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
  1077. !s->progressive_sequence;
  1078. s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
  1079. s->current_picture_ptr->f->pict_type = s->pict_type;
  1080. // if (s->avctx->flags && AV_CODEC_FLAG_QSCALE)
  1081. // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
  1082. s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
  1083. if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
  1084. s->current_picture_ptr)) < 0)
  1085. return ret;
  1086. if (s->pict_type != AV_PICTURE_TYPE_B) {
  1087. s->last_picture_ptr = s->next_picture_ptr;
  1088. if (!s->droppable)
  1089. s->next_picture_ptr = s->current_picture_ptr;
  1090. }
  1091. ff_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
  1092. s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
  1093. s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
  1094. s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
  1095. s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
  1096. s->pict_type, s->droppable);
  1097. if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
  1098. (s->pict_type != AV_PICTURE_TYPE_I)) {
  1099. int h_chroma_shift, v_chroma_shift;
  1100. av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
  1101. &h_chroma_shift, &v_chroma_shift);
  1102. if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
  1103. av_log(avctx, AV_LOG_DEBUG,
  1104. "allocating dummy last picture for B frame\n");
  1105. else if (s->pict_type != AV_PICTURE_TYPE_I)
  1106. av_log(avctx, AV_LOG_ERROR,
  1107. "warning: first frame is no keyframe\n");
  1108. /* Allocate a dummy frame */
  1109. i = ff_find_unused_picture(s->avctx, s->picture, 0);
  1110. if (i < 0) {
  1111. av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
  1112. return i;
  1113. }
  1114. s->last_picture_ptr = &s->picture[i];
  1115. s->last_picture_ptr->reference = 3;
  1116. s->last_picture_ptr->f->key_frame = 0;
  1117. s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
  1118. if (alloc_picture(s, s->last_picture_ptr) < 0) {
  1119. s->last_picture_ptr = NULL;
  1120. return -1;
  1121. }
  1122. if (!avctx->hwaccel) {
  1123. for(i=0; i<avctx->height; i++)
  1124. memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
  1125. 0x80, avctx->width);
  1126. if (s->last_picture_ptr->f->data[2]) {
  1127. for(i=0; i<AV_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
  1128. memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
  1129. 0x80, AV_CEIL_RSHIFT(avctx->width, h_chroma_shift));
  1130. memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
  1131. 0x80, AV_CEIL_RSHIFT(avctx->width, h_chroma_shift));
  1132. }
  1133. }
  1134. if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
  1135. for(i=0; i<avctx->height; i++)
  1136. memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
  1137. }
  1138. }
  1139. ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
  1140. ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
  1141. }
  1142. if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
  1143. s->pict_type == AV_PICTURE_TYPE_B) {
  1144. /* Allocate a dummy frame */
  1145. i = ff_find_unused_picture(s->avctx, s->picture, 0);
  1146. if (i < 0) {
  1147. av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
  1148. return i;
  1149. }
  1150. s->next_picture_ptr = &s->picture[i];
  1151. s->next_picture_ptr->reference = 3;
  1152. s->next_picture_ptr->f->key_frame = 0;
  1153. s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
  1154. if (alloc_picture(s, s->next_picture_ptr) < 0) {
  1155. s->next_picture_ptr = NULL;
  1156. return -1;
  1157. }
  1158. ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
  1159. ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
  1160. }
  1161. #if 0 // BUFREF-FIXME
  1162. memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
  1163. memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
  1164. #endif
  1165. if (s->last_picture_ptr) {
  1166. if (s->last_picture_ptr->f->buf[0] &&
  1167. (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
  1168. s->last_picture_ptr)) < 0)
  1169. return ret;
  1170. }
  1171. if (s->next_picture_ptr) {
  1172. if (s->next_picture_ptr->f->buf[0] &&
  1173. (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
  1174. s->next_picture_ptr)) < 0)
  1175. return ret;
  1176. }
  1177. av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
  1178. s->last_picture_ptr->f->buf[0]));
  1179. if (s->picture_structure!= PICT_FRAME) {
  1180. int i;
  1181. for (i = 0; i < 4; i++) {
  1182. if (s->picture_structure == PICT_BOTTOM_FIELD) {
  1183. s->current_picture.f->data[i] +=
  1184. s->current_picture.f->linesize[i];
  1185. }
  1186. s->current_picture.f->linesize[i] *= 2;
  1187. s->last_picture.f->linesize[i] *= 2;
  1188. s->next_picture.f->linesize[i] *= 2;
  1189. }
  1190. }
  1191. /* set dequantizer, we can't do it during init as
  1192. * it might change for MPEG-4 and we can't do it in the header
  1193. * decode as init is not called for MPEG-4 there yet */
  1194. if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
  1195. s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
  1196. s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
  1197. } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
  1198. s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
  1199. s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
  1200. } else {
  1201. s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
  1202. s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
  1203. }
  1204. if (s->avctx->debug & FF_DEBUG_NOMC) {
  1205. gray_frame(s->current_picture_ptr->f);
  1206. }
  1207. return 0;
  1208. }
  1209. /* called after a frame has been decoded. */
  1210. void ff_mpv_frame_end(MpegEncContext *s)
  1211. {
  1212. emms_c();
  1213. if (s->current_picture.reference)
  1214. ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
  1215. }
  1216. void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
  1217. {
  1218. ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
  1219. p->qscale_table, p->motion_val, &s->low_delay,
  1220. s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
  1221. }
  1222. int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
  1223. {
  1224. AVVideoEncParams *par;
  1225. int mult = (qp_type == FF_QSCALE_TYPE_MPEG1) ? 2 : 1;
  1226. unsigned int nb_mb = p->alloc_mb_height * p->alloc_mb_width;
  1227. unsigned int x, y;
  1228. if (!(s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS))
  1229. return 0;
  1230. par = av_video_enc_params_create_side_data(f, AV_VIDEO_ENC_PARAMS_MPEG2, nb_mb);
  1231. if (!par)
  1232. return AVERROR(ENOMEM);
  1233. for (y = 0; y < p->alloc_mb_height; y++)
  1234. for (x = 0; x < p->alloc_mb_width; x++) {
  1235. const unsigned int block_idx = y * p->alloc_mb_width + x;
  1236. const unsigned int mb_xy = y * p->alloc_mb_stride + x;
  1237. AVVideoBlockParams *b = av_video_enc_params_block(par, block_idx);
  1238. b->src_x = x * 16;
  1239. b->src_y = y * 16;
  1240. b->w = 16;
  1241. b->h = 16;
  1242. b->delta_qp = p->qscale_table[mb_xy] * mult;
  1243. }
  1244. return 0;
  1245. }
  1246. static inline int hpel_motion_lowres(MpegEncContext *s,
  1247. uint8_t *dest, uint8_t *src,
  1248. int field_based, int field_select,
  1249. int src_x, int src_y,
  1250. int width, int height, ptrdiff_t stride,
  1251. int h_edge_pos, int v_edge_pos,
  1252. int w, int h, h264_chroma_mc_func *pix_op,
  1253. int motion_x, int motion_y)
  1254. {
  1255. const int lowres = s->avctx->lowres;
  1256. const int op_index = FFMIN(lowres, 3);
  1257. const int s_mask = (2 << lowres) - 1;
  1258. int emu = 0;
  1259. int sx, sy;
  1260. if (s->quarter_sample) {
  1261. motion_x /= 2;
  1262. motion_y /= 2;
  1263. }
  1264. sx = motion_x & s_mask;
  1265. sy = motion_y & s_mask;
  1266. src_x += motion_x >> lowres + 1;
  1267. src_y += motion_y >> lowres + 1;
  1268. src += src_y * stride + src_x;
  1269. if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
  1270. (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
  1271. s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src,
  1272. s->linesize, s->linesize,
  1273. w + 1, (h + 1) << field_based,
  1274. src_x, src_y << field_based,
  1275. h_edge_pos, v_edge_pos);
  1276. src = s->sc.edge_emu_buffer;
  1277. emu = 1;
  1278. }
  1279. sx = (sx << 2) >> lowres;
  1280. sy = (sy << 2) >> lowres;
  1281. if (field_select)
  1282. src += s->linesize;
  1283. pix_op[op_index](dest, src, stride, h, sx, sy);
  1284. return emu;
  1285. }
  1286. /* apply one mpeg motion vector to the three components */
  1287. static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
  1288. uint8_t *dest_y,
  1289. uint8_t *dest_cb,
  1290. uint8_t *dest_cr,
  1291. int field_based,
  1292. int bottom_field,
  1293. int field_select,
  1294. uint8_t **ref_picture,
  1295. h264_chroma_mc_func *pix_op,
  1296. int motion_x, int motion_y,
  1297. int h, int mb_y)
  1298. {
  1299. uint8_t *ptr_y, *ptr_cb, *ptr_cr;
  1300. int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
  1301. ptrdiff_t uvlinesize, linesize;
  1302. const int lowres = s->avctx->lowres;
  1303. const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
  1304. const int block_s = 8>>lowres;
  1305. const int s_mask = (2 << lowres) - 1;
  1306. const int h_edge_pos = s->h_edge_pos >> lowres;
  1307. const int v_edge_pos = s->v_edge_pos >> lowres;
  1308. linesize = s->current_picture.f->linesize[0] << field_based;
  1309. uvlinesize = s->current_picture.f->linesize[1] << field_based;
  1310. // FIXME obviously not perfect but qpel will not work in lowres anyway
  1311. if (s->quarter_sample) {
  1312. motion_x /= 2;
  1313. motion_y /= 2;
  1314. }
  1315. if(field_based){
  1316. motion_y += (bottom_field - field_select)*((1 << lowres)-1);
  1317. }
  1318. sx = motion_x & s_mask;
  1319. sy = motion_y & s_mask;
  1320. src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
  1321. src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
  1322. if (s->out_format == FMT_H263) {
  1323. uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
  1324. uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
  1325. uvsrc_x = src_x >> 1;
  1326. uvsrc_y = src_y >> 1;
  1327. } else if (s->out_format == FMT_H261) {
  1328. // even chroma mv's are full pel in H261
  1329. mx = motion_x / 4;
  1330. my = motion_y / 4;
  1331. uvsx = (2 * mx) & s_mask;
  1332. uvsy = (2 * my) & s_mask;
  1333. uvsrc_x = s->mb_x * block_s + (mx >> lowres);
  1334. uvsrc_y = mb_y * block_s + (my >> lowres);
  1335. } else {
  1336. if(s->chroma_y_shift){
  1337. mx = motion_x / 2;
  1338. my = motion_y / 2;
  1339. uvsx = mx & s_mask;
  1340. uvsy = my & s_mask;
  1341. uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
  1342. uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
  1343. } else {
  1344. if(s->chroma_x_shift){
  1345. //Chroma422
  1346. mx = motion_x / 2;
  1347. uvsx = mx & s_mask;
  1348. uvsy = motion_y & s_mask;
  1349. uvsrc_y = src_y;
  1350. uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
  1351. } else {
  1352. //Chroma444
  1353. uvsx = motion_x & s_mask;
  1354. uvsy = motion_y & s_mask;
  1355. uvsrc_x = src_x;
  1356. uvsrc_y = src_y;
  1357. }
  1358. }
  1359. }
  1360. ptr_y = ref_picture[0] + src_y * linesize + src_x;
  1361. ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
  1362. ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
  1363. if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
  1364. (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
  1365. s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
  1366. linesize >> field_based, linesize >> field_based,
  1367. 17, 17 + field_based,
  1368. src_x, src_y << field_based, h_edge_pos,
  1369. v_edge_pos);
  1370. ptr_y = s->sc.edge_emu_buffer;
  1371. if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
  1372. uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
  1373. uint8_t *vbuf =ubuf + 10 * s->uvlinesize;
  1374. if (s->workaround_bugs & FF_BUG_IEDGE)
  1375. vbuf -= s->uvlinesize;
  1376. s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
  1377. uvlinesize >> field_based, uvlinesize >> field_based,
  1378. 9, 9 + field_based,
  1379. uvsrc_x, uvsrc_y << field_based,
  1380. h_edge_pos >> 1, v_edge_pos >> 1);
  1381. s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
  1382. uvlinesize >> field_based,uvlinesize >> field_based,
  1383. 9, 9 + field_based,
  1384. uvsrc_x, uvsrc_y << field_based,
  1385. h_edge_pos >> 1, v_edge_pos >> 1);
  1386. ptr_cb = ubuf;
  1387. ptr_cr = vbuf;
  1388. }
  1389. }
  1390. // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
  1391. if (bottom_field) {
  1392. dest_y += s->linesize;
  1393. dest_cb += s->uvlinesize;
  1394. dest_cr += s->uvlinesize;
  1395. }
  1396. if (field_select) {
  1397. ptr_y += s->linesize;
  1398. ptr_cb += s->uvlinesize;
  1399. ptr_cr += s->uvlinesize;
  1400. }
  1401. sx = (sx << 2) >> lowres;
  1402. sy = (sy << 2) >> lowres;
  1403. pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
  1404. if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
  1405. int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
  1406. uvsx = (uvsx << 2) >> lowres;
  1407. uvsy = (uvsy << 2) >> lowres;
  1408. if (hc) {
  1409. pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
  1410. pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
  1411. }
  1412. }
  1413. // FIXME h261 lowres loop filter
  1414. }
  1415. static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
  1416. uint8_t *dest_cb, uint8_t *dest_cr,
  1417. uint8_t **ref_picture,
  1418. h264_chroma_mc_func * pix_op,
  1419. int mx, int my)
  1420. {
  1421. const int lowres = s->avctx->lowres;
  1422. const int op_index = FFMIN(lowres, 3);
  1423. const int block_s = 8 >> lowres;
  1424. const int s_mask = (2 << lowres) - 1;
  1425. const int h_edge_pos = s->h_edge_pos >> lowres + 1;
  1426. const int v_edge_pos = s->v_edge_pos >> lowres + 1;
  1427. int emu = 0, src_x, src_y, sx, sy;
  1428. ptrdiff_t offset;
  1429. uint8_t *ptr;
  1430. if (s->quarter_sample) {
  1431. mx /= 2;
  1432. my /= 2;
  1433. }
  1434. /* In case of 8X8, we construct a single chroma motion vector
  1435. with a special rounding */
  1436. mx = ff_h263_round_chroma(mx);
  1437. my = ff_h263_round_chroma(my);
  1438. sx = mx & s_mask;
  1439. sy = my & s_mask;
  1440. src_x = s->mb_x * block_s + (mx >> lowres + 1);
  1441. src_y = s->mb_y * block_s + (my >> lowres + 1);
  1442. offset = src_y * s->uvlinesize + src_x;
  1443. ptr = ref_picture[1] + offset;
  1444. if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
  1445. (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
  1446. s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
  1447. s->uvlinesize, s->uvlinesize,
  1448. 9, 9,
  1449. src_x, src_y, h_edge_pos, v_edge_pos);
  1450. ptr = s->sc.edge_emu_buffer;
  1451. emu = 1;
  1452. }
  1453. sx = (sx << 2) >> lowres;
  1454. sy = (sy << 2) >> lowres;
  1455. pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
  1456. ptr = ref_picture[2] + offset;
  1457. if (emu) {
  1458. s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
  1459. s->uvlinesize, s->uvlinesize,
  1460. 9, 9,
  1461. src_x, src_y, h_edge_pos, v_edge_pos);
  1462. ptr = s->sc.edge_emu_buffer;
  1463. }
  1464. pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
  1465. }
  1466. /**
  1467. * motion compensation of a single macroblock
  1468. * @param s context
  1469. * @param dest_y luma destination pointer
  1470. * @param dest_cb chroma cb/u destination pointer
  1471. * @param dest_cr chroma cr/v destination pointer
  1472. * @param dir direction (0->forward, 1->backward)
  1473. * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
  1474. * @param pix_op halfpel motion compensation function (average or put normally)
  1475. * the motion vectors are taken from s->mv and the MV type from s->mv_type
  1476. */
  1477. static inline void MPV_motion_lowres(MpegEncContext *s,
  1478. uint8_t *dest_y, uint8_t *dest_cb,
  1479. uint8_t *dest_cr,
  1480. int dir, uint8_t **ref_picture,
  1481. h264_chroma_mc_func *pix_op)
  1482. {
  1483. int mx, my;
  1484. int mb_x, mb_y, i;
  1485. const int lowres = s->avctx->lowres;
  1486. const int block_s = 8 >>lowres;
  1487. mb_x = s->mb_x;
  1488. mb_y = s->mb_y;
  1489. switch (s->mv_type) {
  1490. case MV_TYPE_16X16:
  1491. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  1492. 0, 0, 0,
  1493. ref_picture, pix_op,
  1494. s->mv[dir][0][0], s->mv[dir][0][1],
  1495. 2 * block_s, mb_y);
  1496. break;
  1497. case MV_TYPE_8X8:
  1498. mx = 0;
  1499. my = 0;
  1500. for (i = 0; i < 4; i++) {
  1501. hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
  1502. s->linesize) * block_s,
  1503. ref_picture[0], 0, 0,
  1504. (2 * mb_x + (i & 1)) * block_s,
  1505. (2 * mb_y + (i >> 1)) * block_s,
  1506. s->width, s->height, s->linesize,
  1507. s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
  1508. block_s, block_s, pix_op,
  1509. s->mv[dir][i][0], s->mv[dir][i][1]);
  1510. mx += s->mv[dir][i][0];
  1511. my += s->mv[dir][i][1];
  1512. }
  1513. if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
  1514. chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
  1515. pix_op, mx, my);
  1516. break;
  1517. case MV_TYPE_FIELD:
  1518. if (s->picture_structure == PICT_FRAME) {
  1519. /* top field */
  1520. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  1521. 1, 0, s->field_select[dir][0],
  1522. ref_picture, pix_op,
  1523. s->mv[dir][0][0], s->mv[dir][0][1],
  1524. block_s, mb_y);
  1525. /* bottom field */
  1526. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  1527. 1, 1, s->field_select[dir][1],
  1528. ref_picture, pix_op,
  1529. s->mv[dir][1][0], s->mv[dir][1][1],
  1530. block_s, mb_y);
  1531. } else {
  1532. if (s->picture_structure != s->field_select[dir][0] + 1 &&
  1533. s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
  1534. ref_picture = s->current_picture_ptr->f->data;
  1535. }
  1536. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  1537. 0, 0, s->field_select[dir][0],
  1538. ref_picture, pix_op,
  1539. s->mv[dir][0][0],
  1540. s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
  1541. }
  1542. break;
  1543. case MV_TYPE_16X8:
  1544. for (i = 0; i < 2; i++) {
  1545. uint8_t **ref2picture;
  1546. if (s->picture_structure == s->field_select[dir][i] + 1 ||
  1547. s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
  1548. ref2picture = ref_picture;
  1549. } else {
  1550. ref2picture = s->current_picture_ptr->f->data;
  1551. }
  1552. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  1553. 0, 0, s->field_select[dir][i],
  1554. ref2picture, pix_op,
  1555. s->mv[dir][i][0], s->mv[dir][i][1] +
  1556. 2 * block_s * i, block_s, mb_y >> 1);
  1557. dest_y += 2 * block_s * s->linesize;
  1558. dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
  1559. dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
  1560. }
  1561. break;
  1562. case MV_TYPE_DMV:
  1563. if (s->picture_structure == PICT_FRAME) {
  1564. for (i = 0; i < 2; i++) {
  1565. int j;
  1566. for (j = 0; j < 2; j++) {
  1567. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  1568. 1, j, j ^ i,
  1569. ref_picture, pix_op,
  1570. s->mv[dir][2 * i + j][0],
  1571. s->mv[dir][2 * i + j][1],
  1572. block_s, mb_y);
  1573. }
  1574. pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
  1575. }
  1576. } else {
  1577. for (i = 0; i < 2; i++) {
  1578. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  1579. 0, 0, s->picture_structure != i + 1,
  1580. ref_picture, pix_op,
  1581. s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
  1582. 2 * block_s, mb_y >> 1);
  1583. // after put we make avg of the same block
  1584. pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
  1585. // opposite parity is always in the same
  1586. // frame if this is second field
  1587. if (!s->first_field) {
  1588. ref_picture = s->current_picture_ptr->f->data;
  1589. }
  1590. }
  1591. }
  1592. break;
  1593. default:
  1594. av_assert2(0);
  1595. }
  1596. }
  1597. /**
  1598. * find the lowest MB row referenced in the MVs
  1599. */
  1600. static int lowest_referenced_row(MpegEncContext *s, int dir)
  1601. {
  1602. int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
  1603. int my, off, i, mvs;
  1604. if (s->picture_structure != PICT_FRAME || s->mcsel)
  1605. goto unhandled;
  1606. switch (s->mv_type) {
  1607. case MV_TYPE_16X16:
  1608. mvs = 1;
  1609. break;
  1610. case MV_TYPE_16X8:
  1611. mvs = 2;
  1612. break;
  1613. case MV_TYPE_8X8:
  1614. mvs = 4;
  1615. break;
  1616. default:
  1617. goto unhandled;
  1618. }
  1619. for (i = 0; i < mvs; i++) {
  1620. my = s->mv[dir][i][1];
  1621. my_max = FFMAX(my_max, my);
  1622. my_min = FFMIN(my_min, my);
  1623. }
  1624. off = ((FFMAX(-my_min, my_max)<<qpel_shift) + 63) >> 6;
  1625. return av_clip(s->mb_y + off, 0, s->mb_height - 1);
  1626. unhandled:
  1627. return s->mb_height-1;
  1628. }
  1629. /* put block[] to dest[] */
  1630. static inline void put_dct(MpegEncContext *s,
  1631. int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
  1632. {
  1633. s->dct_unquantize_intra(s, block, i, qscale);
  1634. s->idsp.idct_put(dest, line_size, block);
  1635. }
  1636. /* add block[] to dest[] */
  1637. static inline void add_dct(MpegEncContext *s,
  1638. int16_t *block, int i, uint8_t *dest, int line_size)
  1639. {
  1640. if (s->block_last_index[i] >= 0) {
  1641. s->idsp.idct_add(dest, line_size, block);
  1642. }
  1643. }
  1644. static inline void add_dequant_dct(MpegEncContext *s,
  1645. int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
  1646. {
  1647. if (s->block_last_index[i] >= 0) {
  1648. s->dct_unquantize_inter(s, block, i, qscale);
  1649. s->idsp.idct_add(dest, line_size, block);
  1650. }
  1651. }
  1652. /**
  1653. * Clean dc, ac, coded_block for the current non-intra MB.
  1654. */
  1655. void ff_clean_intra_table_entries(MpegEncContext *s)
  1656. {
  1657. int wrap = s->b8_stride;
  1658. int xy = s->block_index[0];
  1659. s->dc_val[0][xy ] =
  1660. s->dc_val[0][xy + 1 ] =
  1661. s->dc_val[0][xy + wrap] =
  1662. s->dc_val[0][xy + 1 + wrap] = 1024;
  1663. /* ac pred */
  1664. memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
  1665. memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
  1666. if (s->msmpeg4_version>=3) {
  1667. s->coded_block[xy ] =
  1668. s->coded_block[xy + 1 ] =
  1669. s->coded_block[xy + wrap] =
  1670. s->coded_block[xy + 1 + wrap] = 0;
  1671. }
  1672. /* chroma */
  1673. wrap = s->mb_stride;
  1674. xy = s->mb_x + s->mb_y * wrap;
  1675. s->dc_val[1][xy] =
  1676. s->dc_val[2][xy] = 1024;
  1677. /* ac pred */
  1678. memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
  1679. memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
  1680. s->mbintra_table[xy]= 0;
  1681. }
  1682. /* generic function called after a macroblock has been parsed by the
  1683. decoder or after it has been encoded by the encoder.
  1684. Important variables used:
  1685. s->mb_intra : true if intra macroblock
  1686. s->mv_dir : motion vector direction
  1687. s->mv_type : motion vector type
  1688. s->mv : motion vector
  1689. s->interlaced_dct : true if interlaced dct used (mpeg2)
  1690. */
  1691. static av_always_inline
  1692. void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64],
  1693. int lowres_flag, int is_mpeg12)
  1694. {
  1695. const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
  1696. if (CONFIG_XVMC &&
  1697. s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
  1698. s->avctx->hwaccel->decode_mb(s);//xvmc uses pblocks
  1699. return;
  1700. }
  1701. if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
  1702. /* print DCT coefficients */
  1703. int i,j;
  1704. av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
  1705. for(i=0; i<6; i++){
  1706. for(j=0; j<64; j++){
  1707. av_log(s->avctx, AV_LOG_DEBUG, "%5d",
  1708. block[i][s->idsp.idct_permutation[j]]);
  1709. }
  1710. av_log(s->avctx, AV_LOG_DEBUG, "\n");
  1711. }
  1712. }
  1713. s->current_picture.qscale_table[mb_xy] = s->qscale;
  1714. /* update DC predictors for P macroblocks */
  1715. if (!s->mb_intra) {
  1716. if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
  1717. if(s->mbintra_table[mb_xy])
  1718. ff_clean_intra_table_entries(s);
  1719. } else {
  1720. s->last_dc[0] =
  1721. s->last_dc[1] =
  1722. s->last_dc[2] = 128 << s->intra_dc_precision;
  1723. }
  1724. }
  1725. else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
  1726. s->mbintra_table[mb_xy]=1;
  1727. if ((s->avctx->flags & AV_CODEC_FLAG_PSNR) || s->frame_skip_threshold || s->frame_skip_factor ||
  1728. !(s->encoding && (s->intra_only || s->pict_type == AV_PICTURE_TYPE_B) &&
  1729. s->avctx->mb_decision != FF_MB_DECISION_RD)) { // FIXME precalc
  1730. uint8_t *dest_y, *dest_cb, *dest_cr;
  1731. int dct_linesize, dct_offset;
  1732. op_pixels_func (*op_pix)[4];
  1733. qpel_mc_func (*op_qpix)[16];
  1734. const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
  1735. const int uvlinesize = s->current_picture.f->linesize[1];
  1736. const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
  1737. const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
  1738. /* avoid copy if macroblock skipped in last frame too */
  1739. /* skip only during decoding as we might trash the buffers during encoding a bit */
  1740. if(!s->encoding){
  1741. uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
  1742. if (s->mb_skipped) {
  1743. s->mb_skipped= 0;
  1744. av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
  1745. *mbskip_ptr = 1;
  1746. } else if(!s->current_picture.reference) {
  1747. *mbskip_ptr = 1;
  1748. } else{
  1749. *mbskip_ptr = 0; /* not skipped */
  1750. }
  1751. }
  1752. dct_linesize = linesize << s->interlaced_dct;
  1753. dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
  1754. if(readable){
  1755. dest_y= s->dest[0];
  1756. dest_cb= s->dest[1];
  1757. dest_cr= s->dest[2];
  1758. }else{
  1759. dest_y = s->sc.b_scratchpad;
  1760. dest_cb= s->sc.b_scratchpad+16*linesize;
  1761. dest_cr= s->sc.b_scratchpad+32*linesize;
  1762. }
  1763. if (!s->mb_intra) {
  1764. /* motion handling */
  1765. /* decoding or more than one mb_type (MC was already done otherwise) */
  1766. if(!s->encoding){
  1767. if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
  1768. if (s->mv_dir & MV_DIR_FORWARD) {
  1769. ff_thread_await_progress(&s->last_picture_ptr->tf,
  1770. lowest_referenced_row(s, 0),
  1771. 0);
  1772. }
  1773. if (s->mv_dir & MV_DIR_BACKWARD) {
  1774. ff_thread_await_progress(&s->next_picture_ptr->tf,
  1775. lowest_referenced_row(s, 1),
  1776. 0);
  1777. }
  1778. }
  1779. if(lowres_flag){
  1780. h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
  1781. if (s->mv_dir & MV_DIR_FORWARD) {
  1782. MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
  1783. op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
  1784. }
  1785. if (s->mv_dir & MV_DIR_BACKWARD) {
  1786. MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
  1787. }
  1788. }else{
  1789. op_qpix = s->me.qpel_put;
  1790. if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
  1791. op_pix = s->hdsp.put_pixels_tab;
  1792. }else{
  1793. op_pix = s->hdsp.put_no_rnd_pixels_tab;
  1794. }
  1795. if (s->mv_dir & MV_DIR_FORWARD) {
  1796. ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
  1797. op_pix = s->hdsp.avg_pixels_tab;
  1798. op_qpix= s->me.qpel_avg;
  1799. }
  1800. if (s->mv_dir & MV_DIR_BACKWARD) {
  1801. ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
  1802. }
  1803. }
  1804. }
  1805. /* skip dequant / idct if we are really late ;) */
  1806. if(s->avctx->skip_idct){
  1807. if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
  1808. ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
  1809. || s->avctx->skip_idct >= AVDISCARD_ALL)
  1810. goto skip_idct;
  1811. }
  1812. /* add dct residue */
  1813. if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
  1814. || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
  1815. add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
  1816. add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
  1817. add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
  1818. add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
  1819. if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
  1820. if (s->chroma_y_shift){
  1821. add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
  1822. add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
  1823. }else{
  1824. dct_linesize >>= 1;
  1825. dct_offset >>=1;
  1826. add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
  1827. add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
  1828. add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
  1829. add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
  1830. }
  1831. }
  1832. } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
  1833. add_dct(s, block[0], 0, dest_y , dct_linesize);
  1834. add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
  1835. add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
  1836. add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
  1837. if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
  1838. if(s->chroma_y_shift){//Chroma420
  1839. add_dct(s, block[4], 4, dest_cb, uvlinesize);
  1840. add_dct(s, block[5], 5, dest_cr, uvlinesize);
  1841. }else{
  1842. //chroma422
  1843. dct_linesize = uvlinesize << s->interlaced_dct;
  1844. dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
  1845. add_dct(s, block[4], 4, dest_cb, dct_linesize);
  1846. add_dct(s, block[5], 5, dest_cr, dct_linesize);
  1847. add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
  1848. add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
  1849. if(!s->chroma_x_shift){//Chroma444
  1850. add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
  1851. add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
  1852. add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
  1853. add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
  1854. }
  1855. }
  1856. }//fi gray
  1857. }
  1858. else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
  1859. ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
  1860. }
  1861. } else {
  1862. /* Only MPEG-4 Simple Studio Profile is supported in > 8-bit mode.
  1863. TODO: Integrate 10-bit properly into mpegvideo.c so that ER works properly */
  1864. if (s->avctx->bits_per_raw_sample > 8){
  1865. const int act_block_size = block_size * 2;
  1866. if(s->dpcm_direction == 0) {
  1867. s->idsp.idct_put(dest_y, dct_linesize, (int16_t*)(*s->block32)[0]);
  1868. s->idsp.idct_put(dest_y + act_block_size, dct_linesize, (int16_t*)(*s->block32)[1]);
  1869. s->idsp.idct_put(dest_y + dct_offset, dct_linesize, (int16_t*)(*s->block32)[2]);
  1870. s->idsp.idct_put(dest_y + dct_offset + act_block_size, dct_linesize, (int16_t*)(*s->block32)[3]);
  1871. dct_linesize = uvlinesize << s->interlaced_dct;
  1872. dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
  1873. s->idsp.idct_put(dest_cb, dct_linesize, (int16_t*)(*s->block32)[4]);
  1874. s->idsp.idct_put(dest_cr, dct_linesize, (int16_t*)(*s->block32)[5]);
  1875. s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, (int16_t*)(*s->block32)[6]);
  1876. s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, (int16_t*)(*s->block32)[7]);
  1877. if(!s->chroma_x_shift){//Chroma444
  1878. s->idsp.idct_put(dest_cb + act_block_size, dct_linesize, (int16_t*)(*s->block32)[8]);
  1879. s->idsp.idct_put(dest_cr + act_block_size, dct_linesize, (int16_t*)(*s->block32)[9]);
  1880. s->idsp.idct_put(dest_cb + act_block_size + dct_offset, dct_linesize, (int16_t*)(*s->block32)[10]);
  1881. s->idsp.idct_put(dest_cr + act_block_size + dct_offset, dct_linesize, (int16_t*)(*s->block32)[11]);
  1882. }
  1883. } else if(s->dpcm_direction == 1) {
  1884. int i, w, h;
  1885. uint16_t *dest_pcm[3] = {(uint16_t*)dest_y, (uint16_t*)dest_cb, (uint16_t*)dest_cr};
  1886. int linesize[3] = {dct_linesize, uvlinesize, uvlinesize};
  1887. for(i = 0; i < 3; i++) {
  1888. int idx = 0;
  1889. int vsub = i ? s->chroma_y_shift : 0;
  1890. int hsub = i ? s->chroma_x_shift : 0;
  1891. for(h = 0; h < (16 >> vsub); h++){
  1892. for(w = 0; w < (16 >> hsub); w++)
  1893. dest_pcm[i][w] = (*s->dpcm_macroblock)[i][idx++];
  1894. dest_pcm[i] += linesize[i] / 2;
  1895. }
  1896. }
  1897. } else if(s->dpcm_direction == -1) {
  1898. int i, w, h;
  1899. uint16_t *dest_pcm[3] = {(uint16_t*)dest_y, (uint16_t*)dest_cb, (uint16_t*)dest_cr};
  1900. int linesize[3] = {dct_linesize, uvlinesize, uvlinesize};
  1901. for(i = 0; i < 3; i++) {
  1902. int idx = 0;
  1903. int vsub = i ? s->chroma_y_shift : 0;
  1904. int hsub = i ? s->chroma_x_shift : 0;
  1905. dest_pcm[i] += (linesize[i] / 2) * ((16 >> vsub) - 1);
  1906. for(h = (16 >> vsub)-1; h >= 1; h--){
  1907. for(w = (16 >> hsub)-1; w >= 1; w--)
  1908. dest_pcm[i][w] = (*s->dpcm_macroblock)[i][idx++];
  1909. dest_pcm[i] -= linesize[i] / 2;
  1910. }
  1911. }
  1912. }
  1913. }
  1914. /* dct only in intra block */
  1915. else if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
  1916. put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
  1917. put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
  1918. put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
  1919. put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
  1920. if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
  1921. if(s->chroma_y_shift){
  1922. put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
  1923. put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
  1924. }else{
  1925. dct_offset >>=1;
  1926. dct_linesize >>=1;
  1927. put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
  1928. put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
  1929. put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
  1930. put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
  1931. }
  1932. }
  1933. }else{
  1934. s->idsp.idct_put(dest_y, dct_linesize, block[0]);
  1935. s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
  1936. s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
  1937. s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
  1938. if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
  1939. if(s->chroma_y_shift){
  1940. s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
  1941. s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
  1942. }else{
  1943. dct_linesize = uvlinesize << s->interlaced_dct;
  1944. dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
  1945. s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
  1946. s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
  1947. s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
  1948. s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
  1949. if(!s->chroma_x_shift){//Chroma444
  1950. s->idsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
  1951. s->idsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
  1952. s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
  1953. s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
  1954. }
  1955. }
  1956. }//gray
  1957. }
  1958. }
  1959. skip_idct:
  1960. if(!readable){
  1961. s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
  1962. if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
  1963. s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
  1964. s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
  1965. }
  1966. }
  1967. }
  1968. }
  1969. void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
  1970. {
  1971. #if !CONFIG_SMALL
  1972. if(s->out_format == FMT_MPEG1) {
  1973. if(s->avctx->lowres) mpv_reconstruct_mb_internal(s, block, 1, 1);
  1974. else mpv_reconstruct_mb_internal(s, block, 0, 1);
  1975. } else
  1976. #endif
  1977. if(s->avctx->lowres) mpv_reconstruct_mb_internal(s, block, 1, 0);
  1978. else mpv_reconstruct_mb_internal(s, block, 0, 0);
  1979. }
  1980. void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
  1981. {
  1982. ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
  1983. s->last_picture_ptr ? s->last_picture_ptr->f : NULL, y, h, s->picture_structure,
  1984. s->first_field, s->low_delay);
  1985. }
  1986. void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
  1987. const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
  1988. const int uvlinesize = s->current_picture.f->linesize[1];
  1989. const int width_of_mb = (4 + (s->avctx->bits_per_raw_sample > 8)) - s->avctx->lowres;
  1990. const int height_of_mb = 4 - s->avctx->lowres;
  1991. s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
  1992. s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
  1993. s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
  1994. s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
  1995. s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
  1996. s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
  1997. //block_index is not used by mpeg2, so it is not affected by chroma_format
  1998. s->dest[0] = s->current_picture.f->data[0] + (int)((s->mb_x - 1U) << width_of_mb);
  1999. s->dest[1] = s->current_picture.f->data[1] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
  2000. s->dest[2] = s->current_picture.f->data[2] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
  2001. if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
  2002. {
  2003. if(s->picture_structure==PICT_FRAME){
  2004. s->dest[0] += s->mb_y * linesize << height_of_mb;
  2005. s->dest[1] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
  2006. s->dest[2] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
  2007. }else{
  2008. s->dest[0] += (s->mb_y>>1) * linesize << height_of_mb;
  2009. s->dest[1] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
  2010. s->dest[2] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
  2011. av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
  2012. }
  2013. }
  2014. }
  2015. void ff_mpeg_flush(AVCodecContext *avctx){
  2016. int i;
  2017. MpegEncContext *s = avctx->priv_data;
  2018. if (!s || !s->picture)
  2019. return;
  2020. for (i = 0; i < MAX_PICTURE_COUNT; i++)
  2021. ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
  2022. s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
  2023. ff_mpeg_unref_picture(s->avctx, &s->current_picture);
  2024. ff_mpeg_unref_picture(s->avctx, &s->last_picture);
  2025. ff_mpeg_unref_picture(s->avctx, &s->next_picture);
  2026. s->mb_x= s->mb_y= 0;
  2027. s->closed_gop= 0;
  2028. s->parse_context.state= -1;
  2029. s->parse_context.frame_start_found= 0;
  2030. s->parse_context.overread= 0;
  2031. s->parse_context.overread_index= 0;
  2032. s->parse_context.index= 0;
  2033. s->parse_context.last_index= 0;
  2034. s->bitstream_buffer_size=0;
  2035. s->pp_time=0;
  2036. }
  2037. /**
  2038. * set qscale and update qscale dependent variables.
  2039. */
  2040. void ff_set_qscale(MpegEncContext * s, int qscale)
  2041. {
  2042. if (qscale < 1)
  2043. qscale = 1;
  2044. else if (qscale > 31)
  2045. qscale = 31;
  2046. s->qscale = qscale;
  2047. s->chroma_qscale= s->chroma_qscale_table[qscale];
  2048. s->y_dc_scale= s->y_dc_scale_table[ qscale ];
  2049. s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
  2050. }
  2051. void ff_mpv_report_decode_progress(MpegEncContext *s)
  2052. {
  2053. if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
  2054. ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
  2055. }